drm/i915: Add more control to wait_for routines
[linux-block.git] / drivers / gpu / drm / i915 / intel_uncore.c
CommitLineData
907b28c5
CW
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include "i915_drv.h"
25#include "intel_drv.h"
cf9d2890 26#include "i915_vgpu.h"
907b28c5 27
264ec1a8 28#include <asm/iosf_mbi.h>
6daccb0b
CW
29#include <linux/pm_runtime.h>
30
83e33372 31#define FORCEWAKE_ACK_TIMEOUT_MS 50
6b07b6d2 32#define GT_FIFO_TIMEOUT_MS 10
907b28c5 33
75aa3f63 34#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
6af5d92f 35
05a2fb15
MK
36static const char * const forcewake_domain_names[] = {
37 "render",
38 "blitter",
39 "media",
40};
41
42const char *
48c1026a 43intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
05a2fb15 44{
53abb679 45 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
05a2fb15
MK
46
47 if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
48 return forcewake_domain_names[id];
49
50 WARN_ON(id);
51
52 return "unknown";
53}
54
05a2fb15 55static inline void
577ac4bd
CW
56fw_domain_reset(struct drm_i915_private *i915,
57 const struct intel_uncore_forcewake_domain *d)
907b28c5 58{
6e3955a5 59 __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_reset);
907b28c5
CW
60}
61
05a2fb15
MK
62static inline void
63fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
907b28c5 64{
a57a4a67
TU
65 d->wake_count++;
66 hrtimer_start_range_ns(&d->timer,
8b0e1953 67 NSEC_PER_MSEC,
a57a4a67
TU
68 NSEC_PER_MSEC,
69 HRTIMER_MODE_REL);
907b28c5
CW
70}
71
71306303
MK
72static inline int
73__wait_for_ack(const struct drm_i915_private *i915,
74 const struct intel_uncore_forcewake_domain *d,
75 const u32 ack,
76 const u32 value)
77{
78 return wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) & ack) == value,
79 FORCEWAKE_ACK_TIMEOUT_MS);
80}
81
82static inline int
83wait_ack_clear(const struct drm_i915_private *i915,
84 const struct intel_uncore_forcewake_domain *d,
85 const u32 ack)
86{
87 return __wait_for_ack(i915, d, ack, 0);
88}
89
90static inline int
91wait_ack_set(const struct drm_i915_private *i915,
92 const struct intel_uncore_forcewake_domain *d,
93 const u32 ack)
94{
95 return __wait_for_ack(i915, d, ack, ack);
96}
97
05a2fb15 98static inline void
6e3955a5 99fw_domain_wait_ack_clear(const struct drm_i915_private *i915,
577ac4bd 100 const struct intel_uncore_forcewake_domain *d)
907b28c5 101{
71306303 102 if (wait_ack_clear(i915, d, FORCEWAKE_KERNEL))
05a2fb15
MK
103 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
104 intel_uncore_forcewake_domain_to_str(d->id));
105}
907b28c5 106
71306303
MK
107enum ack_type {
108 ACK_CLEAR = 0,
109 ACK_SET
110};
111
112static int
113fw_domain_wait_ack_with_fallback(const struct drm_i915_private *i915,
114 const struct intel_uncore_forcewake_domain *d,
115 const enum ack_type type)
116{
117 const u32 ack_bit = FORCEWAKE_KERNEL;
118 const u32 value = type == ACK_SET ? ack_bit : 0;
119 unsigned int pass;
120 bool ack_detected;
121
122 /*
123 * There is a possibility of driver's wake request colliding
124 * with hardware's own wake requests and that can cause
125 * hardware to not deliver the driver's ack message.
126 *
127 * Use a fallback bit toggle to kick the gpu state machine
128 * in the hope that the original ack will be delivered along with
129 * the fallback ack.
130 *
131 * This workaround is described in HSDES #1604254524
132 */
133
134 pass = 1;
135 do {
136 wait_ack_clear(i915, d, FORCEWAKE_KERNEL_FALLBACK);
137
138 __raw_i915_write32(i915, d->reg_set,
139 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL_FALLBACK));
140 /* Give gt some time to relax before the polling frenzy */
141 udelay(10 * pass);
142 wait_ack_set(i915, d, FORCEWAKE_KERNEL_FALLBACK);
143
144 ack_detected = (__raw_i915_read32(i915, d->reg_ack) & ack_bit) == value;
145
146 __raw_i915_write32(i915, d->reg_set,
147 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL_FALLBACK));
148 } while (!ack_detected && pass++ < 10);
149
150 DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n",
151 intel_uncore_forcewake_domain_to_str(d->id),
152 type == ACK_SET ? "set" : "clear",
153 __raw_i915_read32(i915, d->reg_ack),
154 pass);
155
156 return ack_detected ? 0 : -ETIMEDOUT;
157}
158
159static inline void
160fw_domain_wait_ack_clear_fallback(const struct drm_i915_private *i915,
161 const struct intel_uncore_forcewake_domain *d)
162{
163 if (likely(!wait_ack_clear(i915, d, FORCEWAKE_KERNEL)))
164 return;
165
166 if (fw_domain_wait_ack_with_fallback(i915, d, ACK_CLEAR))
167 fw_domain_wait_ack_clear(i915, d);
168}
169
05a2fb15 170static inline void
577ac4bd
CW
171fw_domain_get(struct drm_i915_private *i915,
172 const struct intel_uncore_forcewake_domain *d)
05a2fb15 173{
6e3955a5 174 __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_set);
05a2fb15 175}
907b28c5 176
05a2fb15 177static inline void
71306303
MK
178fw_domain_wait_ack_set(const struct drm_i915_private *i915,
179 const struct intel_uncore_forcewake_domain *d)
05a2fb15 180{
71306303 181 if (wait_ack_set(i915, d, FORCEWAKE_KERNEL))
05a2fb15
MK
182 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
183 intel_uncore_forcewake_domain_to_str(d->id));
184}
907b28c5 185
71306303
MK
186static inline void
187fw_domain_wait_ack_set_fallback(const struct drm_i915_private *i915,
188 const struct intel_uncore_forcewake_domain *d)
189{
190 if (likely(!wait_ack_set(i915, d, FORCEWAKE_KERNEL)))
191 return;
192
193 if (fw_domain_wait_ack_with_fallback(i915, d, ACK_SET))
194 fw_domain_wait_ack_set(i915, d);
195}
196
05a2fb15 197static inline void
6e3955a5 198fw_domain_put(const struct drm_i915_private *i915,
577ac4bd 199 const struct intel_uncore_forcewake_domain *d)
05a2fb15 200{
6e3955a5 201 __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_clear);
907b28c5
CW
202}
203
05a2fb15 204static void
577ac4bd 205fw_domains_get(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
907b28c5 206{
05a2fb15 207 struct intel_uncore_forcewake_domain *d;
d2dc94bc 208 unsigned int tmp;
907b28c5 209
d2dc94bc
CW
210 GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
211
212 for_each_fw_domain_masked(d, fw_domains, i915, tmp) {
577ac4bd
CW
213 fw_domain_wait_ack_clear(i915, d);
214 fw_domain_get(i915, d);
05a2fb15 215 }
4e1176dd 216
d2dc94bc 217 for_each_fw_domain_masked(d, fw_domains, i915, tmp)
71306303
MK
218 fw_domain_wait_ack_set(i915, d);
219
220 i915->uncore.fw_domains_active |= fw_domains;
221}
222
223static void
224fw_domains_get_with_fallback(struct drm_i915_private *i915,
225 enum forcewake_domains fw_domains)
226{
227 struct intel_uncore_forcewake_domain *d;
228 unsigned int tmp;
229
230 GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
231
232 for_each_fw_domain_masked(d, fw_domains, i915, tmp) {
233 fw_domain_wait_ack_clear_fallback(i915, d);
234 fw_domain_get(i915, d);
235 }
236
237 for_each_fw_domain_masked(d, fw_domains, i915, tmp)
238 fw_domain_wait_ack_set_fallback(i915, d);
b8473050 239
577ac4bd 240 i915->uncore.fw_domains_active |= fw_domains;
05a2fb15 241}
907b28c5 242
05a2fb15 243static void
577ac4bd 244fw_domains_put(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
05a2fb15
MK
245{
246 struct intel_uncore_forcewake_domain *d;
d2dc94bc
CW
247 unsigned int tmp;
248
249 GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
907b28c5 250
0f966aaf 251 for_each_fw_domain_masked(d, fw_domains, i915, tmp)
577ac4bd 252 fw_domain_put(i915, d);
b8473050 253
577ac4bd 254 i915->uncore.fw_domains_active &= ~fw_domains;
05a2fb15 255}
907b28c5 256
05a2fb15 257static void
577ac4bd
CW
258fw_domains_reset(struct drm_i915_private *i915,
259 enum forcewake_domains fw_domains)
05a2fb15
MK
260{
261 struct intel_uncore_forcewake_domain *d;
d2dc94bc 262 unsigned int tmp;
05a2fb15 263
d2dc94bc 264 if (!fw_domains)
3225b2f9 265 return;
f9b3927a 266
d2dc94bc
CW
267 GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
268
269 for_each_fw_domain_masked(d, fw_domains, i915, tmp)
577ac4bd 270 fw_domain_reset(i915, d);
05a2fb15
MK
271}
272
273static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
274{
275 /* w/a for a sporadic read returning 0 by waiting for the GT
276 * thread to wake up.
277 */
278 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
279 GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
280 DRM_ERROR("GT thread status wait timed out\n");
281}
282
283static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
48c1026a 284 enum forcewake_domains fw_domains)
05a2fb15
MK
285{
286 fw_domains_get(dev_priv, fw_domains);
907b28c5 287
05a2fb15 288 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
c549f738 289 __gen6_gt_wait_for_thread_c0(dev_priv);
907b28c5
CW
290}
291
c32e3788
DG
292static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
293{
294 u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
295
296 return count & GT_FIFO_FREE_ENTRIES_MASK;
297}
298
6b07b6d2 299static void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
907b28c5 300{
6b07b6d2 301 u32 n;
907b28c5 302
5135d64b
D
303 /* On VLV, FIFO will be shared by both SW and HW.
304 * So, we need to read the FREE_ENTRIES everytime */
2d1fe073 305 if (IS_VALLEYVIEW(dev_priv))
6b07b6d2
MK
306 n = fifo_free_entries(dev_priv);
307 else
308 n = dev_priv->uncore.fifo_count;
309
310 if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
311 if (wait_for_atomic((n = fifo_free_entries(dev_priv)) >
312 GT_FIFO_NUM_RESERVED_ENTRIES,
313 GT_FIFO_TIMEOUT_MS)) {
314 DRM_DEBUG("GT_FIFO timeout, entries: %u\n", n);
315 return;
907b28c5 316 }
907b28c5 317 }
907b28c5 318
6b07b6d2 319 dev_priv->uncore.fifo_count = n - 1;
907b28c5
CW
320}
321
a57a4a67
TU
322static enum hrtimer_restart
323intel_uncore_fw_release_timer(struct hrtimer *timer)
38cff0b1 324{
a57a4a67
TU
325 struct intel_uncore_forcewake_domain *domain =
326 container_of(timer, struct intel_uncore_forcewake_domain, timer);
577ac4bd
CW
327 struct drm_i915_private *dev_priv =
328 container_of(domain, struct drm_i915_private, uncore.fw_domain[domain->id]);
b2cff0db 329 unsigned long irqflags;
38cff0b1 330
003342a5 331 assert_rpm_device_not_suspended(dev_priv);
38cff0b1 332
c9e0c6da
CW
333 if (xchg(&domain->active, false))
334 return HRTIMER_RESTART;
335
003342a5 336 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
b2cff0db
CW
337 if (WARN_ON(domain->wake_count == 0))
338 domain->wake_count++;
339
b8473050 340 if (--domain->wake_count == 0)
003342a5 341 dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
b2cff0db 342
003342a5 343 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
a57a4a67
TU
344
345 return HRTIMER_NORESTART;
38cff0b1
ZW
346}
347
a5266db4 348/* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
68f60946
HG
349static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
350 bool restore)
38cff0b1 351{
48c1026a 352 unsigned long irqflags;
b2cff0db 353 struct intel_uncore_forcewake_domain *domain;
48c1026a 354 int retry_count = 100;
003342a5 355 enum forcewake_domains fw, active_domains;
38cff0b1 356
a5266db4
HG
357 iosf_mbi_assert_punit_acquired();
358
b2cff0db
CW
359 /* Hold uncore.lock across reset to prevent any register access
360 * with forcewake not set correctly. Wait until all pending
361 * timers are run before holding.
362 */
363 while (1) {
d2dc94bc
CW
364 unsigned int tmp;
365
b2cff0db 366 active_domains = 0;
38cff0b1 367
d2dc94bc 368 for_each_fw_domain(domain, dev_priv, tmp) {
c9e0c6da 369 smp_store_mb(domain->active, false);
a57a4a67 370 if (hrtimer_cancel(&domain->timer) == 0)
b2cff0db 371 continue;
38cff0b1 372
a57a4a67 373 intel_uncore_fw_release_timer(&domain->timer);
b2cff0db 374 }
aec347ab 375
b2cff0db 376 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
b2ec142c 377
d2dc94bc 378 for_each_fw_domain(domain, dev_priv, tmp) {
a57a4a67 379 if (hrtimer_active(&domain->timer))
33c582c1 380 active_domains |= domain->mask;
b2cff0db 381 }
3123fcaf 382
b2cff0db
CW
383 if (active_domains == 0)
384 break;
aec347ab 385
b2cff0db
CW
386 if (--retry_count == 0) {
387 DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
388 break;
389 }
0294ae7b 390
b2cff0db
CW
391 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
392 cond_resched();
393 }
0294ae7b 394
b2cff0db
CW
395 WARN_ON(active_domains);
396
003342a5 397 fw = dev_priv->uncore.fw_domains_active;
b2cff0db
CW
398 if (fw)
399 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
ef46e0d2 400
cb3600db 401 fw_domains_reset(dev_priv, dev_priv->uncore.fw_domains);
38cff0b1 402
0294ae7b 403 if (restore) { /* If reset with a user forcewake, try to restore */
0294ae7b
CW
404 if (fw)
405 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
406
dc97997a 407 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
0294ae7b 408 dev_priv->uncore.fifo_count =
c32e3788 409 fifo_free_entries(dev_priv);
0294ae7b
CW
410 }
411
b2cff0db 412 if (!restore)
59bad947 413 assert_forcewakes_inactive(dev_priv);
b2cff0db 414
0294ae7b 415 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
ef46e0d2
DV
416}
417
c02e85a0
MK
418static u64 gen9_edram_size(struct drm_i915_private *dev_priv)
419{
420 const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
421 const unsigned int sets[4] = { 1, 1, 2, 2 };
422 const u32 cap = dev_priv->edram_cap;
423
424 return EDRAM_NUM_BANKS(cap) *
425 ways[EDRAM_WAYS_IDX(cap)] *
426 sets[EDRAM_SETS_IDX(cap)] *
427 1024 * 1024;
428}
429
3accaf7e 430u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv)
907b28c5 431{
3accaf7e
MK
432 if (!HAS_EDRAM(dev_priv))
433 return 0;
434
c02e85a0
MK
435 /* The needed capability bits for size calculation
436 * are not there with pre gen9 so return 128MB always.
3accaf7e 437 */
c02e85a0
MK
438 if (INTEL_GEN(dev_priv) < 9)
439 return 128 * 1024 * 1024;
3accaf7e 440
c02e85a0 441 return gen9_edram_size(dev_priv);
3accaf7e 442}
907b28c5 443
3accaf7e
MK
444static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv)
445{
446 if (IS_HASWELL(dev_priv) ||
447 IS_BROADWELL(dev_priv) ||
448 INTEL_GEN(dev_priv) >= 9) {
449 dev_priv->edram_cap = __raw_i915_read32(dev_priv,
450 HSW_EDRAM_CAP);
451
452 /* NB: We can't write IDICR yet because we do not have gt funcs
18ce3994 453 * set up */
3accaf7e
MK
454 } else {
455 dev_priv->edram_cap = 0;
18ce3994 456 }
3accaf7e
MK
457
458 if (HAS_EDRAM(dev_priv))
459 DRM_INFO("Found %lluMB of eDRAM\n",
460 intel_uncore_edram_size(dev_priv) / (1024 * 1024));
f9b3927a
MK
461}
462
8a47eb19 463static bool
8ac3e1bb 464fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
8a47eb19
MK
465{
466 u32 dbg;
467
8a47eb19
MK
468 dbg = __raw_i915_read32(dev_priv, FPGA_DBG);
469 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
470 return false;
471
472 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
473
474 return true;
475}
476
8ac3e1bb
MK
477static bool
478vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
479{
480 u32 cer;
481
482 cer = __raw_i915_read32(dev_priv, CLAIM_ER);
483 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
484 return false;
485
486 __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR);
487
488 return true;
489}
490
a338908c
MK
491static bool
492gen6_check_for_fifo_debug(struct drm_i915_private *dev_priv)
493{
494 u32 fifodbg;
495
496 fifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
497
498 if (unlikely(fifodbg)) {
499 DRM_DEBUG_DRIVER("GTFIFODBG = 0x08%x\n", fifodbg);
500 __raw_i915_write32(dev_priv, GTFIFODBG, fifodbg);
501 }
502
503 return fifodbg;
504}
505
8ac3e1bb
MK
506static bool
507check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
508{
a338908c
MK
509 bool ret = false;
510
8ac3e1bb 511 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
a338908c 512 ret |= fpga_check_for_unclaimed_mmio(dev_priv);
8ac3e1bb
MK
513
514 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
a338908c
MK
515 ret |= vlv_check_for_unclaimed_mmio(dev_priv);
516
517 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
518 ret |= gen6_check_for_fifo_debug(dev_priv);
8ac3e1bb 519
a338908c 520 return ret;
8ac3e1bb
MK
521}
522
dc97997a 523static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
f9b3927a
MK
524 bool restore_forcewake)
525{
8a47eb19
MK
526 /* clear out unclaimed reg detection bit */
527 if (check_for_unclaimed_mmio(dev_priv))
528 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
907b28c5 529
a04f90a3 530 /* WaDisableShadowRegForCpd:chv */
dc97997a 531 if (IS_CHERRYVIEW(dev_priv)) {
a04f90a3
D
532 __raw_i915_write32(dev_priv, GTFIFOCTL,
533 __raw_i915_read32(dev_priv, GTFIFOCTL) |
534 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
535 GT_FIFO_CTL_RC6_POLICY_STALL);
536 }
537
a5266db4 538 iosf_mbi_punit_acquire();
dc97997a 539 intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
a5266db4 540 iosf_mbi_punit_release();
521198a2
MK
541}
542
68f60946 543void intel_uncore_suspend(struct drm_i915_private *dev_priv)
ed493883 544{
a5266db4
HG
545 iosf_mbi_punit_acquire();
546 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
264ec1a8 547 &dev_priv->uncore.pmic_bus_access_nb);
68f60946 548 intel_uncore_forcewake_reset(dev_priv, false);
a5266db4 549 iosf_mbi_punit_release();
68f60946
HG
550}
551
552void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
553{
554 __intel_uncore_early_sanitize(dev_priv, true);
264ec1a8
HG
555 iosf_mbi_register_pmic_bus_access_notifier(
556 &dev_priv->uncore.pmic_bus_access_nb);
dc97997a 557 i915_check_and_clear_faults(dev_priv);
ed493883
ID
558}
559
bedf4d79
HG
560void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv)
561{
562 iosf_mbi_register_pmic_bus_access_notifier(
563 &dev_priv->uncore.pmic_bus_access_nb);
564}
565
dc97997a 566void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
521198a2 567{
907b28c5 568 /* BIOS often leaves RC6 enabled, but disable it for hw init */
54b4f68f 569 intel_sanitize_gt_powersave(dev_priv);
907b28c5
CW
570}
571
a6111f7b
CW
572static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
573 enum forcewake_domains fw_domains)
574{
575 struct intel_uncore_forcewake_domain *domain;
d2dc94bc 576 unsigned int tmp;
a6111f7b 577
a6111f7b
CW
578 fw_domains &= dev_priv->uncore.fw_domains;
579
c9e0c6da
CW
580 for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) {
581 if (domain->wake_count++) {
33c582c1 582 fw_domains &= ~domain->mask;
c9e0c6da
CW
583 domain->active = true;
584 }
585 }
a6111f7b 586
b8473050 587 if (fw_domains)
a6111f7b
CW
588 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
589}
590
59bad947
MK
591/**
592 * intel_uncore_forcewake_get - grab forcewake domain references
593 * @dev_priv: i915 device instance
594 * @fw_domains: forcewake domains to get reference on
595 *
596 * This function can be used get GT's forcewake domain references.
597 * Normal register access will handle the forcewake domains automatically.
598 * However if some sequence requires the GT to not power down a particular
599 * forcewake domains this function should be called at the beginning of the
600 * sequence. And subsequently the reference should be dropped by symmetric
601 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
602 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
907b28c5 603 */
59bad947 604void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
48c1026a 605 enum forcewake_domains fw_domains)
907b28c5
CW
606{
607 unsigned long irqflags;
608
ab484f8f
BW
609 if (!dev_priv->uncore.funcs.force_wake_get)
610 return;
611
c9b8846a 612 assert_rpm_wakelock_held(dev_priv);
c8c8fb33 613
6daccb0b 614 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
a6111f7b 615 __intel_uncore_forcewake_get(dev_priv, fw_domains);
907b28c5
CW
616 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
617}
618
d7a133d8
CW
619/**
620 * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
621 * @dev_priv: i915 device instance
622 *
623 * This function is a wrapper around intel_uncore_forcewake_get() to acquire
624 * the GT powerwell and in the process disable our debugging for the
625 * duration of userspace's bypass.
626 */
627void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv)
628{
629 spin_lock_irq(&dev_priv->uncore.lock);
630 if (!dev_priv->uncore.user_forcewake.count++) {
631 intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
632
633 /* Save and disable mmio debugging for the user bypass */
634 dev_priv->uncore.user_forcewake.saved_mmio_check =
635 dev_priv->uncore.unclaimed_mmio_check;
636 dev_priv->uncore.user_forcewake.saved_mmio_debug =
4f044a88 637 i915_modparams.mmio_debug;
d7a133d8
CW
638
639 dev_priv->uncore.unclaimed_mmio_check = 0;
4f044a88 640 i915_modparams.mmio_debug = 0;
d7a133d8
CW
641 }
642 spin_unlock_irq(&dev_priv->uncore.lock);
643}
644
645/**
646 * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
647 * @dev_priv: i915 device instance
648 *
649 * This function complements intel_uncore_forcewake_user_get() and releases
650 * the GT powerwell taken on behalf of the userspace bypass.
651 */
652void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv)
653{
654 spin_lock_irq(&dev_priv->uncore.lock);
655 if (!--dev_priv->uncore.user_forcewake.count) {
656 if (intel_uncore_unclaimed_mmio(dev_priv))
657 dev_info(dev_priv->drm.dev,
658 "Invalid mmio detected during user access\n");
659
660 dev_priv->uncore.unclaimed_mmio_check =
661 dev_priv->uncore.user_forcewake.saved_mmio_check;
4f044a88 662 i915_modparams.mmio_debug =
d7a133d8
CW
663 dev_priv->uncore.user_forcewake.saved_mmio_debug;
664
665 intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
666 }
667 spin_unlock_irq(&dev_priv->uncore.lock);
668}
669
59bad947 670/**
a6111f7b 671 * intel_uncore_forcewake_get__locked - grab forcewake domain references
59bad947 672 * @dev_priv: i915 device instance
a6111f7b 673 * @fw_domains: forcewake domains to get reference on
59bad947 674 *
a6111f7b
CW
675 * See intel_uncore_forcewake_get(). This variant places the onus
676 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
907b28c5 677 */
a6111f7b
CW
678void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
679 enum forcewake_domains fw_domains)
680{
67520415 681 lockdep_assert_held(&dev_priv->uncore.lock);
a6111f7b
CW
682
683 if (!dev_priv->uncore.funcs.force_wake_get)
684 return;
685
686 __intel_uncore_forcewake_get(dev_priv, fw_domains);
687}
688
689static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
690 enum forcewake_domains fw_domains)
907b28c5 691{
b2cff0db 692 struct intel_uncore_forcewake_domain *domain;
d2dc94bc 693 unsigned int tmp;
907b28c5 694
b2cff0db
CW
695 fw_domains &= dev_priv->uncore.fw_domains;
696
d2dc94bc 697 for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) {
b2cff0db
CW
698 if (WARN_ON(domain->wake_count == 0))
699 continue;
700
c9e0c6da
CW
701 if (--domain->wake_count) {
702 domain->active = true;
b2cff0db 703 continue;
c9e0c6da 704 }
b2cff0db 705
05a2fb15 706 fw_domain_arm_timer(domain);
aec347ab 707 }
a6111f7b 708}
dc9fb09c 709
a6111f7b
CW
710/**
711 * intel_uncore_forcewake_put - release a forcewake domain reference
712 * @dev_priv: i915 device instance
713 * @fw_domains: forcewake domains to put references
714 *
715 * This function drops the device-level forcewakes for specified
716 * domains obtained by intel_uncore_forcewake_get().
717 */
718void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
719 enum forcewake_domains fw_domains)
720{
721 unsigned long irqflags;
722
723 if (!dev_priv->uncore.funcs.force_wake_put)
724 return;
725
726 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
727 __intel_uncore_forcewake_put(dev_priv, fw_domains);
907b28c5
CW
728 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
729}
730
a6111f7b
CW
731/**
732 * intel_uncore_forcewake_put__locked - grab forcewake domain references
733 * @dev_priv: i915 device instance
734 * @fw_domains: forcewake domains to get reference on
735 *
736 * See intel_uncore_forcewake_put(). This variant places the onus
737 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
738 */
739void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
740 enum forcewake_domains fw_domains)
741{
67520415 742 lockdep_assert_held(&dev_priv->uncore.lock);
a6111f7b
CW
743
744 if (!dev_priv->uncore.funcs.force_wake_put)
745 return;
746
747 __intel_uncore_forcewake_put(dev_priv, fw_domains);
748}
749
59bad947 750void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
e998c40f
PZ
751{
752 if (!dev_priv->uncore.funcs.force_wake_get)
753 return;
754
67e64564
CW
755 WARN(dev_priv->uncore.fw_domains_active,
756 "Expected all fw_domains to be inactive, but %08x are still on\n",
757 dev_priv->uncore.fw_domains_active);
758}
759
760void assert_forcewakes_active(struct drm_i915_private *dev_priv,
761 enum forcewake_domains fw_domains)
762{
763 if (!dev_priv->uncore.funcs.force_wake_get)
764 return;
765
766 assert_rpm_wakelock_held(dev_priv);
767
768 fw_domains &= dev_priv->uncore.fw_domains;
769 WARN(fw_domains & ~dev_priv->uncore.fw_domains_active,
770 "Expected %08x fw_domains to be active, but %08x are off\n",
771 fw_domains, fw_domains & ~dev_priv->uncore.fw_domains_active);
e998c40f
PZ
772}
773
907b28c5 774/* We give fast paths for the really cool registers */
40181697 775#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
907b28c5 776
6863b76c
TU
777#define __gen6_reg_read_fw_domains(offset) \
778({ \
779 enum forcewake_domains __fwd; \
780 if (NEEDS_FORCE_WAKE(offset)) \
781 __fwd = FORCEWAKE_RENDER; \
782 else \
783 __fwd = 0; \
784 __fwd; \
785})
786
9480dbf0 787static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
91e630b9 788{
91e630b9
TU
789 if (offset < entry->start)
790 return -1;
791 else if (offset > entry->end)
792 return 1;
793 else
794 return 0;
795}
796
9480dbf0
TU
797/* Copied and "macroized" from lib/bsearch.c */
798#define BSEARCH(key, base, num, cmp) ({ \
799 unsigned int start__ = 0, end__ = (num); \
800 typeof(base) result__ = NULL; \
801 while (start__ < end__) { \
802 unsigned int mid__ = start__ + (end__ - start__) / 2; \
803 int ret__ = (cmp)((key), (base) + mid__); \
804 if (ret__ < 0) { \
805 end__ = mid__; \
806 } else if (ret__ > 0) { \
807 start__ = mid__ + 1; \
808 } else { \
809 result__ = (base) + mid__; \
810 break; \
811 } \
812 } \
813 result__; \
814})
815
9fc1117c 816static enum forcewake_domains
15157970 817find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
9fc1117c 818{
9480dbf0 819 const struct intel_forcewake_range *entry;
9fc1117c 820
9480dbf0
TU
821 entry = BSEARCH(offset,
822 dev_priv->uncore.fw_domains_table,
823 dev_priv->uncore.fw_domains_table_entries,
91e630b9 824 fw_range_cmp);
38fb6a40 825
99191427
JL
826 if (!entry)
827 return 0;
828
829 WARN(entry->domains & ~dev_priv->uncore.fw_domains,
830 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
831 entry->domains & ~dev_priv->uncore.fw_domains, offset);
832
833 return entry->domains;
9fc1117c
TU
834}
835
836#define GEN_FW_RANGE(s, e, d) \
837 { .start = (s), .end = (e), .domains = (d) }
1938e59a 838
895833bd 839#define HAS_FWTABLE(dev_priv) \
3d16ca58 840 (INTEL_GEN(dev_priv) >= 9 || \
895833bd
TU
841 IS_CHERRYVIEW(dev_priv) || \
842 IS_VALLEYVIEW(dev_priv))
843
b0081239 844/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
9fc1117c
TU
845static const struct intel_forcewake_range __vlv_fw_ranges[] = {
846 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
847 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
848 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
9fc1117c
TU
849 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
850 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
b0081239 851 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
9fc1117c
TU
852 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
853};
1938e59a 854
895833bd 855#define __fwtable_reg_read_fw_domains(offset) \
6863b76c
TU
856({ \
857 enum forcewake_domains __fwd = 0; \
0dd356bb 858 if (NEEDS_FORCE_WAKE((offset))) \
15157970 859 __fwd = find_fw_domain(dev_priv, offset); \
6863b76c
TU
860 __fwd; \
861})
862
47188574 863/* *Must* be sorted by offset! See intel_shadow_table_check(). */
6863b76c 864static const i915_reg_t gen8_shadowed_regs[] = {
47188574
TU
865 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
866 GEN6_RPNSWREQ, /* 0xA008 */
867 GEN6_RC_VIDEO_FREQ, /* 0xA00C */
868 RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */
869 RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */
870 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
6863b76c
TU
871 /* TODO: Other registers are not yet used */
872};
873
9480dbf0 874static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
5a659383 875{
9480dbf0 876 u32 offset = i915_mmio_reg_offset(*reg);
5a659383 877
9480dbf0 878 if (key < offset)
5a659383 879 return -1;
9480dbf0 880 else if (key > offset)
5a659383
TU
881 return 1;
882 else
883 return 0;
884}
885
6863b76c
TU
886static bool is_gen8_shadowed(u32 offset)
887{
9480dbf0 888 const i915_reg_t *regs = gen8_shadowed_regs;
5a659383 889
9480dbf0
TU
890 return BSEARCH(offset, regs, ARRAY_SIZE(gen8_shadowed_regs),
891 mmio_reg_cmp);
6863b76c
TU
892}
893
894#define __gen8_reg_write_fw_domains(offset) \
895({ \
896 enum forcewake_domains __fwd; \
897 if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
898 __fwd = FORCEWAKE_RENDER; \
899 else \
900 __fwd = 0; \
901 __fwd; \
902})
903
b0081239 904/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
9fc1117c
TU
905static const struct intel_forcewake_range __chv_fw_ranges[] = {
906 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
b0081239 907 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c 908 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
b0081239 909 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c 910 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
b0081239 911 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c 912 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
b0081239
TU
913 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
914 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
9fc1117c 915 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
b0081239
TU
916 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
917 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c
TU
918 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
919 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
920 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
921 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
9fc1117c 922};
38fb6a40 923
22d48c55 924#define __fwtable_reg_write_fw_domains(offset) \
6863b76c
TU
925({ \
926 enum forcewake_domains __fwd = 0; \
0dd356bb 927 if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
15157970 928 __fwd = find_fw_domain(dev_priv, offset); \
6863b76c
TU
929 __fwd; \
930})
931
b0081239 932/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
9fc1117c 933static const struct intel_forcewake_range __gen9_fw_ranges[] = {
0dd356bb 934 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
9fc1117c
TU
935 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
936 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
0dd356bb 937 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
9fc1117c 938 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
0dd356bb 939 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
9fc1117c 940 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
0dd356bb 941 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER),
b0081239 942 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
9fc1117c 943 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
0dd356bb 944 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
9fc1117c 945 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
0dd356bb 946 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
b0081239 947 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
0dd356bb 948 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER),
9fc1117c 949 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
0dd356bb 950 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
9fc1117c 951 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
0dd356bb 952 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
b0081239 953 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
78424c92 954 GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER),
9fc1117c 955 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
0dd356bb 956 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER),
b0081239 957 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
0dd356bb 958 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER),
9fc1117c 959 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
0dd356bb 960 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER),
9fc1117c 961 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
0dd356bb 962 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER),
b0081239 963 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
0dd356bb 964 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER),
9fc1117c
TU
965 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
966};
6863b76c 967
907b28c5
CW
968static void
969ilk_dummy_write(struct drm_i915_private *dev_priv)
970{
971 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
972 * the chip from rc6 before touching it for real. MI_MODE is masked,
973 * hence harmless to write 0 into. */
6af5d92f 974 __raw_i915_write32(dev_priv, MI_MODE, 0);
907b28c5
CW
975}
976
977static void
9c053501
MK
978__unclaimed_reg_debug(struct drm_i915_private *dev_priv,
979 const i915_reg_t reg,
980 const bool read,
981 const bool before)
907b28c5 982{
dda96033
CW
983 if (WARN(check_for_unclaimed_mmio(dev_priv) && !before,
984 "Unclaimed %s register 0x%x\n",
985 read ? "read from" : "write to",
4bd0a25d 986 i915_mmio_reg_offset(reg)))
4f044a88
MW
987 /* Only report the first N failures */
988 i915_modparams.mmio_debug--;
907b28c5
CW
989}
990
9c053501
MK
991static inline void
992unclaimed_reg_debug(struct drm_i915_private *dev_priv,
993 const i915_reg_t reg,
994 const bool read,
995 const bool before)
996{
4f044a88 997 if (likely(!i915_modparams.mmio_debug))
9c053501
MK
998 return;
999
1000 __unclaimed_reg_debug(dev_priv, reg, read, before);
1001}
1002
51f67885 1003#define GEN2_READ_HEADER(x) \
5d738795 1004 u##x val = 0; \
da5827c3 1005 assert_rpm_wakelock_held(dev_priv);
5d738795 1006
51f67885 1007#define GEN2_READ_FOOTER \
5d738795
BW
1008 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1009 return val
1010
51f67885 1011#define __gen2_read(x) \
0b274481 1012static u##x \
f0f59a00 1013gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
51f67885 1014 GEN2_READ_HEADER(x); \
3967018e 1015 val = __raw_i915_read##x(dev_priv, reg); \
51f67885 1016 GEN2_READ_FOOTER; \
3967018e
BW
1017}
1018
1019#define __gen5_read(x) \
1020static u##x \
f0f59a00 1021gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
51f67885 1022 GEN2_READ_HEADER(x); \
3967018e
BW
1023 ilk_dummy_write(dev_priv); \
1024 val = __raw_i915_read##x(dev_priv, reg); \
51f67885 1025 GEN2_READ_FOOTER; \
3967018e
BW
1026}
1027
51f67885
CW
1028__gen5_read(8)
1029__gen5_read(16)
1030__gen5_read(32)
1031__gen5_read(64)
1032__gen2_read(8)
1033__gen2_read(16)
1034__gen2_read(32)
1035__gen2_read(64)
1036
1037#undef __gen5_read
1038#undef __gen2_read
1039
1040#undef GEN2_READ_FOOTER
1041#undef GEN2_READ_HEADER
1042
1043#define GEN6_READ_HEADER(x) \
f0f59a00 1044 u32 offset = i915_mmio_reg_offset(reg); \
51f67885
CW
1045 unsigned long irqflags; \
1046 u##x val = 0; \
da5827c3 1047 assert_rpm_wakelock_held(dev_priv); \
9c053501
MK
1048 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
1049 unclaimed_reg_debug(dev_priv, reg, true, true)
51f67885
CW
1050
1051#define GEN6_READ_FOOTER \
9c053501 1052 unclaimed_reg_debug(dev_priv, reg, true, false); \
51f67885
CW
1053 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
1054 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1055 return val
1056
c521b0c8
TU
1057static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
1058 enum forcewake_domains fw_domains)
b2cff0db
CW
1059{
1060 struct intel_uncore_forcewake_domain *domain;
d2dc94bc
CW
1061 unsigned int tmp;
1062
1063 GEM_BUG_ON(fw_domains & ~dev_priv->uncore.fw_domains);
b2cff0db 1064
d2dc94bc 1065 for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp)
c521b0c8
TU
1066 fw_domain_arm_timer(domain);
1067
1068 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
c521b0c8
TU
1069}
1070
1071static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
1072 enum forcewake_domains fw_domains)
1073{
b2cff0db
CW
1074 if (WARN_ON(!fw_domains))
1075 return;
1076
003342a5
TU
1077 /* Turn on all requested but inactive supported forcewake domains. */
1078 fw_domains &= dev_priv->uncore.fw_domains;
1079 fw_domains &= ~dev_priv->uncore.fw_domains_active;
b2cff0db 1080
c521b0c8
TU
1081 if (fw_domains)
1082 ___force_wake_auto(dev_priv, fw_domains);
b2cff0db
CW
1083}
1084
ccfceda2 1085#define __gen_read(func, x) \
3967018e 1086static u##x \
ccfceda2 1087func##_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
6863b76c 1088 enum forcewake_domains fw_engine; \
51f67885 1089 GEN6_READ_HEADER(x); \
ccfceda2 1090 fw_engine = __##func##_reg_read_fw_domains(offset); \
6a42d0f4 1091 if (fw_engine) \
b208ba8e 1092 __force_wake_auto(dev_priv, fw_engine); \
6fe72865 1093 val = __raw_i915_read##x(dev_priv, reg); \
51f67885 1094 GEN6_READ_FOOTER; \
940aece4 1095}
ccfceda2
DCS
1096#define __gen6_read(x) __gen_read(gen6, x)
1097#define __fwtable_read(x) __gen_read(fwtable, x)
940aece4 1098
6044c4a3
TU
1099__fwtable_read(8)
1100__fwtable_read(16)
1101__fwtable_read(32)
1102__fwtable_read(64)
3967018e
BW
1103__gen6_read(8)
1104__gen6_read(16)
1105__gen6_read(32)
1106__gen6_read(64)
3967018e 1107
6044c4a3 1108#undef __fwtable_read
3967018e 1109#undef __gen6_read
51f67885
CW
1110#undef GEN6_READ_FOOTER
1111#undef GEN6_READ_HEADER
5d738795 1112
51f67885 1113#define GEN2_WRITE_HEADER \
5d738795 1114 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
da5827c3 1115 assert_rpm_wakelock_held(dev_priv); \
907b28c5 1116
51f67885 1117#define GEN2_WRITE_FOOTER
0d965301 1118
51f67885 1119#define __gen2_write(x) \
0b274481 1120static void \
f0f59a00 1121gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
51f67885 1122 GEN2_WRITE_HEADER; \
4032ef43 1123 __raw_i915_write##x(dev_priv, reg, val); \
51f67885 1124 GEN2_WRITE_FOOTER; \
4032ef43
BW
1125}
1126
1127#define __gen5_write(x) \
1128static void \
f0f59a00 1129gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
51f67885 1130 GEN2_WRITE_HEADER; \
4032ef43
BW
1131 ilk_dummy_write(dev_priv); \
1132 __raw_i915_write##x(dev_priv, reg, val); \
51f67885 1133 GEN2_WRITE_FOOTER; \
4032ef43
BW
1134}
1135
51f67885
CW
1136__gen5_write(8)
1137__gen5_write(16)
1138__gen5_write(32)
51f67885
CW
1139__gen2_write(8)
1140__gen2_write(16)
1141__gen2_write(32)
51f67885
CW
1142
1143#undef __gen5_write
1144#undef __gen2_write
1145
1146#undef GEN2_WRITE_FOOTER
1147#undef GEN2_WRITE_HEADER
1148
1149#define GEN6_WRITE_HEADER \
f0f59a00 1150 u32 offset = i915_mmio_reg_offset(reg); \
51f67885
CW
1151 unsigned long irqflags; \
1152 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
da5827c3 1153 assert_rpm_wakelock_held(dev_priv); \
9c053501
MK
1154 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
1155 unclaimed_reg_debug(dev_priv, reg, false, true)
51f67885
CW
1156
1157#define GEN6_WRITE_FOOTER \
9c053501 1158 unclaimed_reg_debug(dev_priv, reg, false, false); \
51f67885
CW
1159 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
1160
4032ef43
BW
1161#define __gen6_write(x) \
1162static void \
f0f59a00 1163gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
51f67885 1164 GEN6_WRITE_HEADER; \
a338908c
MK
1165 if (NEEDS_FORCE_WAKE(offset)) \
1166 __gen6_gt_wait_for_fifo(dev_priv); \
4032ef43 1167 __raw_i915_write##x(dev_priv, reg, val); \
51f67885 1168 GEN6_WRITE_FOOTER; \
4032ef43
BW
1169}
1170
ccfceda2 1171#define __gen_write(func, x) \
ab2aa47e 1172static void \
ccfceda2 1173func##_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
6863b76c 1174 enum forcewake_domains fw_engine; \
51f67885 1175 GEN6_WRITE_HEADER; \
ccfceda2 1176 fw_engine = __##func##_reg_write_fw_domains(offset); \
6a42d0f4 1177 if (fw_engine) \
b208ba8e 1178 __force_wake_auto(dev_priv, fw_engine); \
1938e59a 1179 __raw_i915_write##x(dev_priv, reg, val); \
51f67885 1180 GEN6_WRITE_FOOTER; \
1938e59a 1181}
ccfceda2
DCS
1182#define __gen8_write(x) __gen_write(gen8, x)
1183#define __fwtable_write(x) __gen_write(fwtable, x)
1938e59a 1184
22d48c55
TU
1185__fwtable_write(8)
1186__fwtable_write(16)
1187__fwtable_write(32)
ab2aa47e
BW
1188__gen8_write(8)
1189__gen8_write(16)
1190__gen8_write(32)
4032ef43
BW
1191__gen6_write(8)
1192__gen6_write(16)
1193__gen6_write(32)
4032ef43 1194
22d48c55 1195#undef __fwtable_write
ab2aa47e 1196#undef __gen8_write
4032ef43 1197#undef __gen6_write
51f67885
CW
1198#undef GEN6_WRITE_FOOTER
1199#undef GEN6_WRITE_HEADER
907b28c5 1200
0757ac8f 1201#define ASSIGN_WRITE_MMIO_VFUNCS(i915, x) \
43d942a7 1202do { \
0757ac8f
CW
1203 (i915)->uncore.funcs.mmio_writeb = x##_write8; \
1204 (i915)->uncore.funcs.mmio_writew = x##_write16; \
1205 (i915)->uncore.funcs.mmio_writel = x##_write32; \
43d942a7
YZ
1206} while (0)
1207
0757ac8f 1208#define ASSIGN_READ_MMIO_VFUNCS(i915, x) \
43d942a7 1209do { \
0757ac8f
CW
1210 (i915)->uncore.funcs.mmio_readb = x##_read8; \
1211 (i915)->uncore.funcs.mmio_readw = x##_read16; \
1212 (i915)->uncore.funcs.mmio_readl = x##_read32; \
1213 (i915)->uncore.funcs.mmio_readq = x##_read64; \
43d942a7
YZ
1214} while (0)
1215
05a2fb15
MK
1216
1217static void fw_domain_init(struct drm_i915_private *dev_priv,
48c1026a 1218 enum forcewake_domain_id domain_id,
f0f59a00
VS
1219 i915_reg_t reg_set,
1220 i915_reg_t reg_ack)
05a2fb15
MK
1221{
1222 struct intel_uncore_forcewake_domain *d;
1223
1224 if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
1225 return;
1226
1227 d = &dev_priv->uncore.fw_domain[domain_id];
1228
1229 WARN_ON(d->wake_count);
1230
6e3955a5
CW
1231 WARN_ON(!i915_mmio_reg_valid(reg_set));
1232 WARN_ON(!i915_mmio_reg_valid(reg_ack));
1233
05a2fb15
MK
1234 d->wake_count = 0;
1235 d->reg_set = reg_set;
1236 d->reg_ack = reg_ack;
1237
05a2fb15
MK
1238 d->id = domain_id;
1239
33c582c1
TU
1240 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
1241 BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
1242 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
1243
d2dc94bc 1244 d->mask = BIT(domain_id);
33c582c1 1245
a57a4a67
TU
1246 hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1247 d->timer.function = intel_uncore_fw_release_timer;
05a2fb15 1248
6e3955a5 1249 dev_priv->uncore.fw_domains |= BIT(domain_id);
f9b3927a 1250
577ac4bd 1251 fw_domain_reset(dev_priv, d);
05a2fb15
MK
1252}
1253
dc97997a 1254static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
0b274481 1255{
e3b1895f 1256 if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv))
3225b2f9
MK
1257 return;
1258
6e3955a5
CW
1259 if (IS_GEN6(dev_priv)) {
1260 dev_priv->uncore.fw_reset = 0;
1261 dev_priv->uncore.fw_set = FORCEWAKE_KERNEL;
1262 dev_priv->uncore.fw_clear = 0;
1263 } else {
1264 /* WaRsClearFWBitsAtReset:bdw,skl */
1265 dev_priv->uncore.fw_reset = _MASKED_BIT_DISABLE(0xffff);
1266 dev_priv->uncore.fw_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
1267 dev_priv->uncore.fw_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
1268 }
1269
3d16ca58 1270 if (INTEL_GEN(dev_priv) >= 9) {
71306303
MK
1271 dev_priv->uncore.funcs.force_wake_get =
1272 fw_domains_get_with_fallback;
05a2fb15
MK
1273 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1274 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1275 FORCEWAKE_RENDER_GEN9,
1276 FORCEWAKE_ACK_RENDER_GEN9);
1277 fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
1278 FORCEWAKE_BLITTER_GEN9,
1279 FORCEWAKE_ACK_BLITTER_GEN9);
1280 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1281 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
dc97997a 1282 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
05a2fb15 1283 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
a338908c 1284 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
05a2fb15
MK
1285 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1286 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1287 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1288 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
dc97997a 1289 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
05a2fb15
MK
1290 dev_priv->uncore.funcs.force_wake_get =
1291 fw_domains_get_with_thread_status;
a338908c 1292 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
05a2fb15
MK
1293 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1294 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
dc97997a 1295 } else if (IS_IVYBRIDGE(dev_priv)) {
0b274481
BW
1296 u32 ecobus;
1297
1298 /* IVB configs may use multi-threaded forcewake */
1299
1300 /* A small trick here - if the bios hasn't configured
1301 * MT forcewake, and if the device is in RC6, then
1302 * force_wake_mt_get will not wake the device and the
1303 * ECOBUS read will return zero. Which will be
1304 * (correctly) interpreted by the test below as MT
1305 * forcewake being disabled.
1306 */
05a2fb15
MK
1307 dev_priv->uncore.funcs.force_wake_get =
1308 fw_domains_get_with_thread_status;
a338908c 1309 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
05a2fb15 1310
f9b3927a
MK
1311 /* We need to init first for ECOBUS access and then
1312 * determine later if we want to reinit, in case of MT access is
6ea2556f
MK
1313 * not working. In this stage we don't know which flavour this
1314 * ivb is, so it is better to reset also the gen6 fw registers
1315 * before the ecobus check.
f9b3927a 1316 */
6ea2556f
MK
1317
1318 __raw_i915_write32(dev_priv, FORCEWAKE, 0);
1319 __raw_posting_read(dev_priv, ECOBUS);
1320
05a2fb15
MK
1321 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1322 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
f9b3927a 1323
556ab7a6 1324 spin_lock_irq(&dev_priv->uncore.lock);
bd527504 1325 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_RENDER);
0b274481 1326 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
a338908c 1327 fw_domains_put(dev_priv, FORCEWAKE_RENDER);
556ab7a6 1328 spin_unlock_irq(&dev_priv->uncore.lock);
0b274481 1329
05a2fb15 1330 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
0b274481
BW
1331 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1332 DRM_INFO("when using vblank-synced partial screen updates.\n");
05a2fb15
MK
1333 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1334 FORCEWAKE, FORCEWAKE_ACK);
0b274481 1335 }
dc97997a 1336 } else if (IS_GEN6(dev_priv)) {
0b274481 1337 dev_priv->uncore.funcs.force_wake_get =
05a2fb15 1338 fw_domains_get_with_thread_status;
a338908c 1339 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
05a2fb15
MK
1340 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1341 FORCEWAKE, FORCEWAKE_ACK);
0b274481 1342 }
3225b2f9
MK
1343
1344 /* All future platforms are expected to require complex power gating */
1345 WARN_ON(dev_priv->uncore.fw_domains == 0);
f9b3927a
MK
1346}
1347
15157970
TU
1348#define ASSIGN_FW_DOMAINS_TABLE(d) \
1349{ \
1350 dev_priv->uncore.fw_domains_table = \
1351 (struct intel_forcewake_range *)(d); \
1352 dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \
1353}
1354
264ec1a8
HG
1355static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
1356 unsigned long action, void *data)
1357{
1358 struct drm_i915_private *dev_priv = container_of(nb,
1359 struct drm_i915_private, uncore.pmic_bus_access_nb);
1360
1361 switch (action) {
1362 case MBI_PMIC_BUS_ACCESS_BEGIN:
1363 /*
1364 * forcewake all now to make sure that we don't need to do a
1365 * forcewake later which on systems where this notifier gets
1366 * called requires the punit to access to the shared pmic i2c
1367 * bus, which will be busy after this notification, leading to:
1368 * "render: timed out waiting for forcewake ack request."
1369 * errors.
ce30560c
HG
1370 *
1371 * The notifier is unregistered during intel_runtime_suspend(),
1372 * so it's ok to access the HW here without holding a RPM
1373 * wake reference -> disable wakeref asserts for the time of
1374 * the access.
264ec1a8 1375 */
ce30560c 1376 disable_rpm_wakeref_asserts(dev_priv);
264ec1a8 1377 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
ce30560c 1378 enable_rpm_wakeref_asserts(dev_priv);
264ec1a8
HG
1379 break;
1380 case MBI_PMIC_BUS_ACCESS_END:
1381 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1382 break;
1383 }
1384
1385 return NOTIFY_OK;
1386}
1387
dc97997a 1388void intel_uncore_init(struct drm_i915_private *dev_priv)
f9b3927a 1389{
dc97997a 1390 i915_check_vgpu(dev_priv);
cf9d2890 1391
3accaf7e 1392 intel_uncore_edram_detect(dev_priv);
dc97997a
CW
1393 intel_uncore_fw_domains_init(dev_priv);
1394 __intel_uncore_early_sanitize(dev_priv, false);
0b274481 1395
75714940 1396 dev_priv->uncore.unclaimed_mmio_check = 1;
264ec1a8
HG
1397 dev_priv->uncore.pmic_bus_access_nb.notifier_call =
1398 i915_pmic_bus_access_notifier;
75714940 1399
e3b1895f 1400 if (IS_GEN(dev_priv, 2, 4) || intel_vgpu_active(dev_priv)) {
0757ac8f
CW
1401 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen2);
1402 ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen2);
e3b1895f 1403 } else if (IS_GEN5(dev_priv)) {
0757ac8f
CW
1404 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen5);
1405 ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen5);
e3b1895f 1406 } else if (IS_GEN(dev_priv, 6, 7)) {
0757ac8f 1407 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen6);
e3b1895f
TU
1408
1409 if (IS_VALLEYVIEW(dev_priv)) {
1410 ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges);
0757ac8f 1411 ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
e3b1895f 1412 } else {
0757ac8f 1413 ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
85ee17eb 1414 }
e3b1895f 1415 } else if (IS_GEN8(dev_priv)) {
dc97997a 1416 if (IS_CHERRYVIEW(dev_priv)) {
15157970 1417 ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
0757ac8f
CW
1418 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
1419 ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
1938e59a
D
1420
1421 } else {
0757ac8f
CW
1422 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen8);
1423 ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
1938e59a 1424 }
e3b1895f
TU
1425 } else {
1426 ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
0757ac8f
CW
1427 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
1428 ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
3967018e 1429 }
ed493883 1430
264ec1a8
HG
1431 iosf_mbi_register_pmic_bus_access_notifier(
1432 &dev_priv->uncore.pmic_bus_access_nb);
0b274481
BW
1433}
1434
dc97997a 1435void intel_uncore_fini(struct drm_i915_private *dev_priv)
0b274481 1436{
0b274481 1437 /* Paranoia: make sure we have disabled everything before we exit. */
dc97997a 1438 intel_uncore_sanitize(dev_priv);
a5266db4
HG
1439
1440 iosf_mbi_punit_acquire();
1441 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
1442 &dev_priv->uncore.pmic_bus_access_nb);
dc97997a 1443 intel_uncore_forcewake_reset(dev_priv, false);
a5266db4 1444 iosf_mbi_punit_release();
0b274481
BW
1445}
1446
3fd3a6ff
JL
1447static const struct reg_whitelist {
1448 i915_reg_t offset_ldw;
1449 i915_reg_t offset_udw;
1450 u16 gen_mask;
1451 u8 size;
1452} reg_read_whitelist[] = { {
1453 .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
1454 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
1455 .gen_mask = INTEL_GEN_MASK(4, 10),
1456 .size = 8
1457} };
907b28c5
CW
1458
1459int i915_reg_read_ioctl(struct drm_device *dev,
1460 void *data, struct drm_file *file)
1461{
fac5e23e 1462 struct drm_i915_private *dev_priv = to_i915(dev);
907b28c5 1463 struct drm_i915_reg_read *reg = data;
3fd3a6ff
JL
1464 struct reg_whitelist const *entry;
1465 unsigned int flags;
1466 int remain;
1467 int ret = 0;
1468
1469 entry = reg_read_whitelist;
1470 remain = ARRAY_SIZE(reg_read_whitelist);
1471 while (remain) {
1472 u32 entry_offset = i915_mmio_reg_offset(entry->offset_ldw);
1473
1474 GEM_BUG_ON(!is_power_of_2(entry->size));
1475 GEM_BUG_ON(entry->size > 8);
1476 GEM_BUG_ON(entry_offset & (entry->size - 1));
1477
1478 if (INTEL_INFO(dev_priv)->gen_mask & entry->gen_mask &&
1479 entry_offset == (reg->offset & -entry->size))
907b28c5 1480 break;
3fd3a6ff
JL
1481 entry++;
1482 remain--;
907b28c5
CW
1483 }
1484
3fd3a6ff 1485 if (!remain)
907b28c5
CW
1486 return -EINVAL;
1487
3fd3a6ff 1488 flags = reg->offset & (entry->size - 1);
648a9bc5 1489
cf67c70f 1490 intel_runtime_pm_get(dev_priv);
3fd3a6ff
JL
1491 if (entry->size == 8 && flags == I915_REG_READ_8B_WA)
1492 reg->val = I915_READ64_2x32(entry->offset_ldw,
1493 entry->offset_udw);
1494 else if (entry->size == 8 && flags == 0)
1495 reg->val = I915_READ64(entry->offset_ldw);
1496 else if (entry->size == 4 && flags == 0)
1497 reg->val = I915_READ(entry->offset_ldw);
1498 else if (entry->size == 2 && flags == 0)
1499 reg->val = I915_READ16(entry->offset_ldw);
1500 else if (entry->size == 1 && flags == 0)
1501 reg->val = I915_READ8(entry->offset_ldw);
1502 else
cf67c70f 1503 ret = -EINVAL;
cf67c70f 1504 intel_runtime_pm_put(dev_priv);
3fd3a6ff 1505
cf67c70f 1506 return ret;
907b28c5
CW
1507}
1508
87de8d56
MK
1509static void gen3_stop_engine(struct intel_engine_cs *engine)
1510{
1511 struct drm_i915_private *dev_priv = engine->i915;
1512 const u32 base = engine->mmio_base;
1513 const i915_reg_t mode = RING_MI_MODE(base);
1514
1515 I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));
1516 if (intel_wait_for_register_fw(dev_priv,
1517 mode,
1518 MODE_IDLE,
1519 MODE_IDLE,
1520 500))
1521 DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n",
1522 engine->name);
1523
11caf551
CW
1524 I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base)));
1525
87de8d56
MK
1526 I915_WRITE_FW(RING_HEAD(base), 0);
1527 I915_WRITE_FW(RING_TAIL(base), 0);
1528
11caf551
CW
1529 /* The ring must be empty before it is disabled */
1530 I915_WRITE_FW(RING_CTL(base), 0);
1531
87de8d56
MK
1532 /* Check acts as a post */
1533 if (I915_READ_FW(RING_HEAD(base)) != 0)
1534 DRM_DEBUG_DRIVER("%s: ring head not parked\n",
1535 engine->name);
1536}
1537
1538static void i915_stop_engines(struct drm_i915_private *dev_priv,
1539 unsigned engine_mask)
2c80353f
MK
1540{
1541 struct intel_engine_cs *engine;
1542 enum intel_engine_id id;
1543
5896a5c8
CW
1544 if (INTEL_GEN(dev_priv) < 3)
1545 return;
1546
87de8d56
MK
1547 for_each_engine_masked(engine, dev_priv, engine_mask, id)
1548 gen3_stop_engine(engine);
2c80353f
MK
1549}
1550
9593a657 1551static bool i915_reset_complete(struct pci_dev *pdev)
907b28c5
CW
1552{
1553 u8 gdrst;
9593a657 1554
dc97997a 1555 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
73bbf6bd 1556 return (gdrst & GRDOM_RESET_STATUS) == 0;
907b28c5
CW
1557}
1558
dc97997a 1559static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
907b28c5 1560{
91c8a326 1561 struct pci_dev *pdev = dev_priv->drm.pdev;
dc97997a 1562
73bbf6bd 1563 /* assert reset for at least 20 usec */
dc97997a 1564 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
9593a657 1565 usleep_range(50, 200);
dc97997a 1566 pci_write_config_byte(pdev, I915_GDRST, 0);
907b28c5 1567
dc97997a 1568 return wait_for(i915_reset_complete(pdev), 500);
73bbf6bd
VS
1569}
1570
9593a657 1571static bool g4x_reset_complete(struct pci_dev *pdev)
73bbf6bd
VS
1572{
1573 u8 gdrst;
9593a657 1574
dc97997a 1575 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
73bbf6bd 1576 return (gdrst & GRDOM_RESET_ENABLE) == 0;
907b28c5
CW
1577}
1578
dc97997a 1579static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
408d4b9e 1580{
91c8a326 1581 struct pci_dev *pdev = dev_priv->drm.pdev;
9593a657 1582
dc97997a
CW
1583 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1584 return wait_for(g4x_reset_complete(pdev), 500);
408d4b9e
VS
1585}
1586
dc97997a 1587static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
fa4f53c4 1588{
91c8a326 1589 struct pci_dev *pdev = dev_priv->drm.pdev;
fa4f53c4
VS
1590 int ret;
1591
fa4f53c4 1592 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
44e1e7ba
CW
1593 I915_WRITE(VDECCLK_GATE_D,
1594 I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
fa4f53c4
VS
1595 POSTING_READ(VDECCLK_GATE_D);
1596
dc97997a 1597 pci_write_config_byte(pdev, I915_GDRST,
fa4f53c4 1598 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
dc97997a 1599 ret = wait_for(g4x_reset_complete(pdev), 500);
9593a657
CW
1600 if (ret) {
1601 DRM_DEBUG_DRIVER("Wait for media reset failed\n");
44e1e7ba 1602 goto out;
9593a657 1603 }
fa4f53c4 1604
44e1e7ba
CW
1605 pci_write_config_byte(pdev, I915_GDRST,
1606 GRDOM_RENDER | GRDOM_RESET_ENABLE);
1607 ret = wait_for(g4x_reset_complete(pdev), 500);
1608 if (ret) {
1609 DRM_DEBUG_DRIVER("Wait for render reset failed\n");
1610 goto out;
1611 }
fa4f53c4 1612
9593a657 1613out:
dc97997a 1614 pci_write_config_byte(pdev, I915_GDRST, 0);
44e1e7ba
CW
1615
1616 I915_WRITE(VDECCLK_GATE_D,
1617 I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1618 POSTING_READ(VDECCLK_GATE_D);
1619
9593a657 1620 return ret;
fa4f53c4
VS
1621}
1622
dc97997a
CW
1623static int ironlake_do_reset(struct drm_i915_private *dev_priv,
1624 unsigned engine_mask)
907b28c5 1625{
907b28c5
CW
1626 int ret;
1627
9593a657 1628 I915_WRITE(ILK_GDSR, ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
87273b71
CW
1629 ret = intel_wait_for_register(dev_priv,
1630 ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
1631 500);
9593a657
CW
1632 if (ret) {
1633 DRM_DEBUG_DRIVER("Wait for render reset failed\n");
1634 goto out;
1635 }
907b28c5 1636
9593a657 1637 I915_WRITE(ILK_GDSR, ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
87273b71
CW
1638 ret = intel_wait_for_register(dev_priv,
1639 ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
1640 500);
9593a657
CW
1641 if (ret) {
1642 DRM_DEBUG_DRIVER("Wait for media reset failed\n");
1643 goto out;
1644 }
9aa7250f 1645
9593a657 1646out:
c039b7f2 1647 I915_WRITE(ILK_GDSR, 0);
9593a657
CW
1648 POSTING_READ(ILK_GDSR);
1649 return ret;
907b28c5
CW
1650}
1651
ee4b6faf
MK
1652/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
1653static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
1654 u32 hw_domain_mask)
907b28c5 1655{
9593a657
CW
1656 int err;
1657
907b28c5
CW
1658 /* GEN6_GDRST is not in the gt power well, no need to check
1659 * for fifo space for the write or forcewake the chip for
1660 * the read
1661 */
ee4b6faf 1662 __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
907b28c5 1663
a3662830 1664 /* Wait for the device to ack the reset requests */
9593a657 1665 err = intel_wait_for_register_fw(dev_priv,
4a17fe13
CW
1666 GEN6_GDRST, hw_domain_mask, 0,
1667 500);
9593a657
CW
1668 if (err)
1669 DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n",
1670 hw_domain_mask);
1671
1672 return err;
ee4b6faf
MK
1673}
1674
1675/**
1676 * gen6_reset_engines - reset individual engines
dc97997a 1677 * @dev_priv: i915 device
ee4b6faf
MK
1678 * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
1679 *
1680 * This function will reset the individual engines that are set in engine_mask.
1681 * If you provide ALL_ENGINES as mask, full global domain reset will be issued.
1682 *
1683 * Note: It is responsibility of the caller to handle the difference between
1684 * asking full domain reset versus reset for all available individual engines.
1685 *
1686 * Returns 0 on success, nonzero on error.
1687 */
dc97997a
CW
1688static int gen6_reset_engines(struct drm_i915_private *dev_priv,
1689 unsigned engine_mask)
ee4b6faf 1690{
ee4b6faf
MK
1691 struct intel_engine_cs *engine;
1692 const u32 hw_engine_mask[I915_NUM_ENGINES] = {
1693 [RCS] = GEN6_GRDOM_RENDER,
1694 [BCS] = GEN6_GRDOM_BLT,
1695 [VCS] = GEN6_GRDOM_MEDIA,
1696 [VCS2] = GEN8_GRDOM_MEDIA2,
1697 [VECS] = GEN6_GRDOM_VECS,
1698 };
1699 u32 hw_mask;
ee4b6faf
MK
1700
1701 if (engine_mask == ALL_ENGINES) {
1702 hw_mask = GEN6_GRDOM_FULL;
1703 } else {
bafb0fce
CW
1704 unsigned int tmp;
1705
ee4b6faf 1706 hw_mask = 0;
bafb0fce 1707 for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
ee4b6faf
MK
1708 hw_mask |= hw_engine_mask[engine->id];
1709 }
1710
4055dc75 1711 return gen6_hw_domain_reset(dev_priv, hw_mask);
907b28c5
CW
1712}
1713
1758b90e 1714/**
1d1a9774 1715 * __intel_wait_for_register_fw - wait until register matches expected state
1758b90e
CW
1716 * @dev_priv: the i915 device
1717 * @reg: the register to read
1718 * @mask: mask to apply to register value
1719 * @value: expected value
1d1a9774
MW
1720 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
1721 * @slow_timeout_ms: slow timeout in millisecond
1722 * @out_value: optional placeholder to hold registry value
1758b90e
CW
1723 *
1724 * This routine waits until the target register @reg contains the expected
3d466cd6
DV
1725 * @value after applying the @mask, i.e. it waits until ::
1726 *
1727 * (I915_READ_FW(reg) & mask) == value
1728 *
1d1a9774 1729 * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
6976e74b 1730 * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
84d84cb7 1731 * must be not larger than 20,0000 microseconds.
1758b90e
CW
1732 *
1733 * Note that this routine assumes the caller holds forcewake asserted, it is
1734 * not suitable for very long waits. See intel_wait_for_register() if you
1735 * wish to wait without holding forcewake for the duration (i.e. you expect
1736 * the wait to be slow).
1737 *
1738 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1739 */
1d1a9774
MW
1740int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
1741 i915_reg_t reg,
3fc7d86b
MW
1742 u32 mask,
1743 u32 value,
1744 unsigned int fast_timeout_us,
1745 unsigned int slow_timeout_ms,
1d1a9774 1746 u32 *out_value)
1758b90e 1747{
ff26ffa8 1748 u32 uninitialized_var(reg_value);
1d1a9774
MW
1749#define done (((reg_value = I915_READ_FW(reg)) & mask) == value)
1750 int ret;
1751
6976e74b 1752 /* Catch any overuse of this function */
84d84cb7
CW
1753 might_sleep_if(slow_timeout_ms);
1754 GEM_BUG_ON(fast_timeout_us > 20000);
6976e74b 1755
84d84cb7
CW
1756 ret = -ETIMEDOUT;
1757 if (fast_timeout_us && fast_timeout_us <= 20000)
1d1a9774 1758 ret = _wait_for_atomic(done, fast_timeout_us, 0);
ff26ffa8 1759 if (ret && slow_timeout_ms)
1d1a9774 1760 ret = wait_for(done, slow_timeout_ms);
84d84cb7 1761
1d1a9774
MW
1762 if (out_value)
1763 *out_value = reg_value;
84d84cb7 1764
1758b90e
CW
1765 return ret;
1766#undef done
1767}
1768
1769/**
23fdbdd7 1770 * __intel_wait_for_register - wait until register matches expected state
1758b90e
CW
1771 * @dev_priv: the i915 device
1772 * @reg: the register to read
1773 * @mask: mask to apply to register value
1774 * @value: expected value
23fdbdd7
SP
1775 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
1776 * @slow_timeout_ms: slow timeout in millisecond
1777 * @out_value: optional placeholder to hold registry value
1758b90e
CW
1778 *
1779 * This routine waits until the target register @reg contains the expected
3d466cd6
DV
1780 * @value after applying the @mask, i.e. it waits until ::
1781 *
1782 * (I915_READ(reg) & mask) == value
1783 *
1758b90e
CW
1784 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
1785 *
1786 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1787 */
23fdbdd7 1788int __intel_wait_for_register(struct drm_i915_private *dev_priv,
1758b90e 1789 i915_reg_t reg,
3fc7d86b
MW
1790 u32 mask,
1791 u32 value,
23fdbdd7
SP
1792 unsigned int fast_timeout_us,
1793 unsigned int slow_timeout_ms,
1794 u32 *out_value)
7fd2d269 1795{
1758b90e
CW
1796 unsigned fw =
1797 intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
23fdbdd7 1798 u32 reg_value;
1758b90e
CW
1799 int ret;
1800
05646543
CW
1801 might_sleep();
1802
1803 spin_lock_irq(&dev_priv->uncore.lock);
1804 intel_uncore_forcewake_get__locked(dev_priv, fw);
1805
1806 ret = __intel_wait_for_register_fw(dev_priv,
1807 reg, mask, value,
23fdbdd7 1808 fast_timeout_us, 0, &reg_value);
05646543
CW
1809
1810 intel_uncore_forcewake_put__locked(dev_priv, fw);
1811 spin_unlock_irq(&dev_priv->uncore.lock);
1812
1758b90e 1813 if (ret)
23fdbdd7
SP
1814 ret = __wait_for(reg_value = I915_READ_NOTRACE(reg),
1815 (reg_value & mask) == value,
1816 slow_timeout_ms * 1000, 10, 1000);
1817
1818 if (out_value)
1819 *out_value = reg_value;
1758b90e
CW
1820
1821 return ret;
d431440c
TE
1822}
1823
e3895af8 1824static int gen8_reset_engine_start(struct intel_engine_cs *engine)
d431440c 1825{
c033666a 1826 struct drm_i915_private *dev_priv = engine->i915;
d431440c 1827 int ret;
d431440c
TE
1828
1829 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1830 _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
1831
1758b90e
CW
1832 ret = intel_wait_for_register_fw(dev_priv,
1833 RING_RESET_CTL(engine->mmio_base),
1834 RESET_CTL_READY_TO_RESET,
1835 RESET_CTL_READY_TO_RESET,
1836 700);
d431440c
TE
1837 if (ret)
1838 DRM_ERROR("%s: reset request timeout\n", engine->name);
1839
1840 return ret;
1841}
1842
e3895af8 1843static void gen8_reset_engine_cancel(struct intel_engine_cs *engine)
d431440c 1844{
c033666a 1845 struct drm_i915_private *dev_priv = engine->i915;
d431440c
TE
1846
1847 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1848 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
7fd2d269
MK
1849}
1850
dc97997a
CW
1851static int gen8_reset_engines(struct drm_i915_private *dev_priv,
1852 unsigned engine_mask)
7fd2d269 1853{
7fd2d269 1854 struct intel_engine_cs *engine;
bafb0fce 1855 unsigned int tmp;
7fd2d269 1856
bafb0fce 1857 for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
e3895af8 1858 if (gen8_reset_engine_start(engine))
7fd2d269 1859 goto not_ready;
7fd2d269 1860
dc97997a 1861 return gen6_reset_engines(dev_priv, engine_mask);
7fd2d269
MK
1862
1863not_ready:
bafb0fce 1864 for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
e3895af8 1865 gen8_reset_engine_cancel(engine);
7fd2d269
MK
1866
1867 return -EIO;
1868}
1869
dc97997a
CW
1870typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
1871
1872static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
907b28c5 1873{
4f044a88 1874 if (!i915_modparams.reset)
b1330fbb
CW
1875 return NULL;
1876
dc97997a 1877 if (INTEL_INFO(dev_priv)->gen >= 8)
ee4b6faf 1878 return gen8_reset_engines;
dc97997a 1879 else if (INTEL_INFO(dev_priv)->gen >= 6)
ee4b6faf 1880 return gen6_reset_engines;
dc97997a 1881 else if (IS_GEN5(dev_priv))
49e4d842 1882 return ironlake_do_reset;
dc97997a 1883 else if (IS_G4X(dev_priv))
49e4d842 1884 return g4x_do_reset;
73f67aa8 1885 else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
49e4d842 1886 return g33_do_reset;
dc97997a 1887 else if (INTEL_INFO(dev_priv)->gen >= 3)
49e4d842 1888 return i915_do_reset;
542c184f 1889 else
49e4d842
CW
1890 return NULL;
1891}
1892
dc97997a 1893int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
49e4d842 1894{
5896a5c8 1895 reset_func reset = intel_get_gpu_reset(dev_priv);
9593a657 1896 int retry;
99106bc1 1897 int ret;
49e4d842 1898
9593a657
CW
1899 might_sleep();
1900
99106bc1
MK
1901 /* If the power well sleeps during the reset, the reset
1902 * request may be dropped and never completes (causing -EIO).
1903 */
1904 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
9593a657 1905 for (retry = 0; retry < 3; retry++) {
87de8d56
MK
1906
1907 /* We stop engines, otherwise we might get failed reset and a
1908 * dead gpu (on elk). Also as modern gpu as kbl can suffer
1909 * from system hang if batchbuffer is progressing when
1910 * the reset is issued, regardless of READY_TO_RESET ack.
1911 * Thus assume it is best to stop engines on all gens
1912 * where we have a gpu reset.
1913 *
1914 * WaMediaResetMainRingCleanup:ctg,elk (presumably)
1915 *
1916 * FIXME: Wa for more modern gens needs to be validated
1917 */
1918 i915_stop_engines(dev_priv, engine_mask);
1919
5896a5c8
CW
1920 ret = -ENODEV;
1921 if (reset)
1922 ret = reset(dev_priv, engine_mask);
9593a657
CW
1923 if (ret != -ETIMEDOUT)
1924 break;
1925
1926 cond_resched();
1927 }
99106bc1
MK
1928 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1929
1930 return ret;
49e4d842
CW
1931}
1932
dc97997a 1933bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
49e4d842 1934{
dc97997a 1935 return intel_get_gpu_reset(dev_priv) != NULL;
907b28c5
CW
1936}
1937
142bc7d9
MT
1938bool intel_has_reset_engine(struct drm_i915_private *dev_priv)
1939{
1940 return (dev_priv->info.has_reset_engine &&
4f044a88 1941 i915_modparams.reset >= 2);
142bc7d9
MT
1942}
1943
cb20a3c0 1944int intel_reset_guc(struct drm_i915_private *dev_priv)
6b332fa2
AS
1945{
1946 int ret;
6b332fa2 1947
1a3d1898 1948 if (!HAS_GUC(dev_priv))
6b332fa2
AS
1949 return -EINVAL;
1950
1951 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
6b332fa2 1952 ret = gen6_hw_domain_reset(dev_priv, GEN9_GRDOM_GUC);
6b332fa2
AS
1953 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1954
1955 return ret;
1956}
1957
fc97618b 1958bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
907b28c5 1959{
fc97618b 1960 return check_for_unclaimed_mmio(dev_priv);
907b28c5 1961}
75714940 1962
bc3b9346 1963bool
75714940
MK
1964intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
1965{
4f044a88 1966 if (unlikely(i915_modparams.mmio_debug ||
75714940 1967 dev_priv->uncore.unclaimed_mmio_check <= 0))
bc3b9346 1968 return false;
75714940
MK
1969
1970 if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
1971 DRM_DEBUG("Unclaimed register detected, "
1972 "enabling oneshot unclaimed register reporting. "
1973 "Please use i915.mmio_debug=N for more information.\n");
4f044a88 1974 i915_modparams.mmio_debug++;
75714940 1975 dev_priv->uncore.unclaimed_mmio_check--;
bc3b9346 1976 return true;
75714940 1977 }
bc3b9346
MK
1978
1979 return false;
75714940 1980}
3756685a
TU
1981
1982static enum forcewake_domains
1983intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
1984 i915_reg_t reg)
1985{
895833bd 1986 u32 offset = i915_mmio_reg_offset(reg);
3756685a
TU
1987 enum forcewake_domains fw_domains;
1988
895833bd
TU
1989 if (HAS_FWTABLE(dev_priv)) {
1990 fw_domains = __fwtable_reg_read_fw_domains(offset);
1991 } else if (INTEL_GEN(dev_priv) >= 6) {
1992 fw_domains = __gen6_reg_read_fw_domains(offset);
1993 } else {
1994 WARN_ON(!IS_GEN(dev_priv, 2, 5));
1995 fw_domains = 0;
3756685a
TU
1996 }
1997
1998 WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
1999
2000 return fw_domains;
2001}
2002
2003static enum forcewake_domains
2004intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
2005 i915_reg_t reg)
2006{
22d48c55 2007 u32 offset = i915_mmio_reg_offset(reg);
3756685a
TU
2008 enum forcewake_domains fw_domains;
2009
22d48c55
TU
2010 if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
2011 fw_domains = __fwtable_reg_write_fw_domains(offset);
2012 } else if (IS_GEN8(dev_priv)) {
2013 fw_domains = __gen8_reg_write_fw_domains(offset);
2014 } else if (IS_GEN(dev_priv, 6, 7)) {
3756685a 2015 fw_domains = FORCEWAKE_RENDER;
22d48c55
TU
2016 } else {
2017 WARN_ON(!IS_GEN(dev_priv, 2, 5));
2018 fw_domains = 0;
3756685a
TU
2019 }
2020
2021 WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
2022
2023 return fw_domains;
2024}
2025
2026/**
2027 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
2028 * a register
2029 * @dev_priv: pointer to struct drm_i915_private
2030 * @reg: register in question
2031 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
2032 *
2033 * Returns a set of forcewake domains required to be taken with for example
2034 * intel_uncore_forcewake_get for the specified register to be accessible in the
2035 * specified mode (read, write or read/write) with raw mmio accessors.
2036 *
2037 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
2038 * callers to do FIFO management on their own or risk losing writes.
2039 */
2040enum forcewake_domains
2041intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
2042 i915_reg_t reg, unsigned int op)
2043{
2044 enum forcewake_domains fw_domains = 0;
2045
2046 WARN_ON(!op);
2047
895833bd
TU
2048 if (intel_vgpu_active(dev_priv))
2049 return 0;
2050
3756685a
TU
2051 if (op & FW_REG_READ)
2052 fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg);
2053
2054 if (op & FW_REG_WRITE)
2055 fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg);
2056
2057 return fw_domains;
2058}
26e7a2a1
CW
2059
2060#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
0757ac8f 2061#include "selftests/mock_uncore.c"
26e7a2a1
CW
2062#include "selftests/intel_uncore.c"
2063#endif