drm/i915: Fix false-positive assert_rpm_wakelock_held in i915_pmic_bus_access_notifier v2
[linux-2.6-block.git] / drivers / gpu / drm / i915 / intel_uncore.c
CommitLineData
907b28c5
CW
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include "i915_drv.h"
25#include "intel_drv.h"
cf9d2890 26#include "i915_vgpu.h"
907b28c5 27
264ec1a8 28#include <asm/iosf_mbi.h>
6daccb0b
CW
29#include <linux/pm_runtime.h>
30
83e33372 31#define FORCEWAKE_ACK_TIMEOUT_MS 50
6b07b6d2 32#define GT_FIFO_TIMEOUT_MS 10
907b28c5 33
75aa3f63 34#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
6af5d92f 35
05a2fb15
MK
36static const char * const forcewake_domain_names[] = {
37 "render",
38 "blitter",
39 "media",
40};
41
42const char *
48c1026a 43intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
05a2fb15 44{
53abb679 45 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
05a2fb15
MK
46
47 if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
48 return forcewake_domain_names[id];
49
50 WARN_ON(id);
51
52 return "unknown";
53}
54
05a2fb15 55static inline void
577ac4bd
CW
56fw_domain_reset(struct drm_i915_private *i915,
57 const struct intel_uncore_forcewake_domain *d)
907b28c5 58{
6e3955a5 59 __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_reset);
907b28c5
CW
60}
61
05a2fb15
MK
62static inline void
63fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
907b28c5 64{
a57a4a67
TU
65 d->wake_count++;
66 hrtimer_start_range_ns(&d->timer,
8b0e1953 67 NSEC_PER_MSEC,
a57a4a67
TU
68 NSEC_PER_MSEC,
69 HRTIMER_MODE_REL);
907b28c5
CW
70}
71
71306303
MK
72static inline int
73__wait_for_ack(const struct drm_i915_private *i915,
74 const struct intel_uncore_forcewake_domain *d,
75 const u32 ack,
76 const u32 value)
77{
78 return wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) & ack) == value,
79 FORCEWAKE_ACK_TIMEOUT_MS);
80}
81
82static inline int
83wait_ack_clear(const struct drm_i915_private *i915,
84 const struct intel_uncore_forcewake_domain *d,
85 const u32 ack)
86{
87 return __wait_for_ack(i915, d, ack, 0);
88}
89
90static inline int
91wait_ack_set(const struct drm_i915_private *i915,
92 const struct intel_uncore_forcewake_domain *d,
93 const u32 ack)
94{
95 return __wait_for_ack(i915, d, ack, ack);
96}
97
05a2fb15 98static inline void
6e3955a5 99fw_domain_wait_ack_clear(const struct drm_i915_private *i915,
577ac4bd 100 const struct intel_uncore_forcewake_domain *d)
907b28c5 101{
71306303 102 if (wait_ack_clear(i915, d, FORCEWAKE_KERNEL))
05a2fb15
MK
103 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
104 intel_uncore_forcewake_domain_to_str(d->id));
105}
907b28c5 106
71306303
MK
107enum ack_type {
108 ACK_CLEAR = 0,
109 ACK_SET
110};
111
112static int
113fw_domain_wait_ack_with_fallback(const struct drm_i915_private *i915,
114 const struct intel_uncore_forcewake_domain *d,
115 const enum ack_type type)
116{
117 const u32 ack_bit = FORCEWAKE_KERNEL;
118 const u32 value = type == ACK_SET ? ack_bit : 0;
119 unsigned int pass;
120 bool ack_detected;
121
122 /*
123 * There is a possibility of driver's wake request colliding
124 * with hardware's own wake requests and that can cause
125 * hardware to not deliver the driver's ack message.
126 *
127 * Use a fallback bit toggle to kick the gpu state machine
128 * in the hope that the original ack will be delivered along with
129 * the fallback ack.
130 *
131 * This workaround is described in HSDES #1604254524
132 */
133
134 pass = 1;
135 do {
136 wait_ack_clear(i915, d, FORCEWAKE_KERNEL_FALLBACK);
137
138 __raw_i915_write32(i915, d->reg_set,
139 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL_FALLBACK));
140 /* Give gt some time to relax before the polling frenzy */
141 udelay(10 * pass);
142 wait_ack_set(i915, d, FORCEWAKE_KERNEL_FALLBACK);
143
144 ack_detected = (__raw_i915_read32(i915, d->reg_ack) & ack_bit) == value;
145
146 __raw_i915_write32(i915, d->reg_set,
147 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL_FALLBACK));
148 } while (!ack_detected && pass++ < 10);
149
150 DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n",
151 intel_uncore_forcewake_domain_to_str(d->id),
152 type == ACK_SET ? "set" : "clear",
153 __raw_i915_read32(i915, d->reg_ack),
154 pass);
155
156 return ack_detected ? 0 : -ETIMEDOUT;
157}
158
159static inline void
160fw_domain_wait_ack_clear_fallback(const struct drm_i915_private *i915,
161 const struct intel_uncore_forcewake_domain *d)
162{
163 if (likely(!wait_ack_clear(i915, d, FORCEWAKE_KERNEL)))
164 return;
165
166 if (fw_domain_wait_ack_with_fallback(i915, d, ACK_CLEAR))
167 fw_domain_wait_ack_clear(i915, d);
168}
169
05a2fb15 170static inline void
577ac4bd
CW
171fw_domain_get(struct drm_i915_private *i915,
172 const struct intel_uncore_forcewake_domain *d)
05a2fb15 173{
6e3955a5 174 __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_set);
05a2fb15 175}
907b28c5 176
05a2fb15 177static inline void
71306303
MK
178fw_domain_wait_ack_set(const struct drm_i915_private *i915,
179 const struct intel_uncore_forcewake_domain *d)
05a2fb15 180{
71306303 181 if (wait_ack_set(i915, d, FORCEWAKE_KERNEL))
05a2fb15
MK
182 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
183 intel_uncore_forcewake_domain_to_str(d->id));
184}
907b28c5 185
71306303
MK
186static inline void
187fw_domain_wait_ack_set_fallback(const struct drm_i915_private *i915,
188 const struct intel_uncore_forcewake_domain *d)
189{
190 if (likely(!wait_ack_set(i915, d, FORCEWAKE_KERNEL)))
191 return;
192
193 if (fw_domain_wait_ack_with_fallback(i915, d, ACK_SET))
194 fw_domain_wait_ack_set(i915, d);
195}
196
05a2fb15 197static inline void
6e3955a5 198fw_domain_put(const struct drm_i915_private *i915,
577ac4bd 199 const struct intel_uncore_forcewake_domain *d)
05a2fb15 200{
6e3955a5 201 __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_clear);
907b28c5
CW
202}
203
05a2fb15 204static void
577ac4bd 205fw_domains_get(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
907b28c5 206{
05a2fb15 207 struct intel_uncore_forcewake_domain *d;
d2dc94bc 208 unsigned int tmp;
907b28c5 209
d2dc94bc
CW
210 GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
211
212 for_each_fw_domain_masked(d, fw_domains, i915, tmp) {
577ac4bd
CW
213 fw_domain_wait_ack_clear(i915, d);
214 fw_domain_get(i915, d);
05a2fb15 215 }
4e1176dd 216
d2dc94bc 217 for_each_fw_domain_masked(d, fw_domains, i915, tmp)
71306303
MK
218 fw_domain_wait_ack_set(i915, d);
219
220 i915->uncore.fw_domains_active |= fw_domains;
221}
222
223static void
224fw_domains_get_with_fallback(struct drm_i915_private *i915,
225 enum forcewake_domains fw_domains)
226{
227 struct intel_uncore_forcewake_domain *d;
228 unsigned int tmp;
229
230 GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
231
232 for_each_fw_domain_masked(d, fw_domains, i915, tmp) {
233 fw_domain_wait_ack_clear_fallback(i915, d);
234 fw_domain_get(i915, d);
235 }
236
237 for_each_fw_domain_masked(d, fw_domains, i915, tmp)
238 fw_domain_wait_ack_set_fallback(i915, d);
b8473050 239
577ac4bd 240 i915->uncore.fw_domains_active |= fw_domains;
05a2fb15 241}
907b28c5 242
05a2fb15 243static void
577ac4bd 244fw_domains_put(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
05a2fb15
MK
245{
246 struct intel_uncore_forcewake_domain *d;
d2dc94bc
CW
247 unsigned int tmp;
248
249 GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
907b28c5 250
0f966aaf 251 for_each_fw_domain_masked(d, fw_domains, i915, tmp)
577ac4bd 252 fw_domain_put(i915, d);
b8473050 253
577ac4bd 254 i915->uncore.fw_domains_active &= ~fw_domains;
05a2fb15 255}
907b28c5 256
05a2fb15 257static void
577ac4bd
CW
258fw_domains_reset(struct drm_i915_private *i915,
259 enum forcewake_domains fw_domains)
05a2fb15
MK
260{
261 struct intel_uncore_forcewake_domain *d;
d2dc94bc 262 unsigned int tmp;
05a2fb15 263
d2dc94bc 264 if (!fw_domains)
3225b2f9 265 return;
f9b3927a 266
d2dc94bc
CW
267 GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
268
269 for_each_fw_domain_masked(d, fw_domains, i915, tmp)
577ac4bd 270 fw_domain_reset(i915, d);
05a2fb15
MK
271}
272
273static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
274{
275 /* w/a for a sporadic read returning 0 by waiting for the GT
276 * thread to wake up.
277 */
278 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
279 GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
280 DRM_ERROR("GT thread status wait timed out\n");
281}
282
283static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
48c1026a 284 enum forcewake_domains fw_domains)
05a2fb15
MK
285{
286 fw_domains_get(dev_priv, fw_domains);
907b28c5 287
05a2fb15 288 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
c549f738 289 __gen6_gt_wait_for_thread_c0(dev_priv);
907b28c5
CW
290}
291
c32e3788
DG
292static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
293{
294 u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
295
296 return count & GT_FIFO_FREE_ENTRIES_MASK;
297}
298
6b07b6d2 299static void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
907b28c5 300{
6b07b6d2 301 u32 n;
907b28c5 302
5135d64b
D
303 /* On VLV, FIFO will be shared by both SW and HW.
304 * So, we need to read the FREE_ENTRIES everytime */
2d1fe073 305 if (IS_VALLEYVIEW(dev_priv))
6b07b6d2
MK
306 n = fifo_free_entries(dev_priv);
307 else
308 n = dev_priv->uncore.fifo_count;
309
310 if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
311 if (wait_for_atomic((n = fifo_free_entries(dev_priv)) >
312 GT_FIFO_NUM_RESERVED_ENTRIES,
313 GT_FIFO_TIMEOUT_MS)) {
314 DRM_DEBUG("GT_FIFO timeout, entries: %u\n", n);
315 return;
907b28c5 316 }
907b28c5 317 }
907b28c5 318
6b07b6d2 319 dev_priv->uncore.fifo_count = n - 1;
907b28c5
CW
320}
321
a57a4a67
TU
322static enum hrtimer_restart
323intel_uncore_fw_release_timer(struct hrtimer *timer)
38cff0b1 324{
a57a4a67
TU
325 struct intel_uncore_forcewake_domain *domain =
326 container_of(timer, struct intel_uncore_forcewake_domain, timer);
577ac4bd
CW
327 struct drm_i915_private *dev_priv =
328 container_of(domain, struct drm_i915_private, uncore.fw_domain[domain->id]);
b2cff0db 329 unsigned long irqflags;
38cff0b1 330
003342a5 331 assert_rpm_device_not_suspended(dev_priv);
38cff0b1 332
c9e0c6da
CW
333 if (xchg(&domain->active, false))
334 return HRTIMER_RESTART;
335
003342a5 336 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
b2cff0db
CW
337 if (WARN_ON(domain->wake_count == 0))
338 domain->wake_count++;
339
b8473050 340 if (--domain->wake_count == 0)
003342a5 341 dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
b2cff0db 342
003342a5 343 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
a57a4a67
TU
344
345 return HRTIMER_NORESTART;
38cff0b1
ZW
346}
347
a5266db4 348/* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
68f60946
HG
349static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
350 bool restore)
38cff0b1 351{
48c1026a 352 unsigned long irqflags;
b2cff0db 353 struct intel_uncore_forcewake_domain *domain;
48c1026a 354 int retry_count = 100;
003342a5 355 enum forcewake_domains fw, active_domains;
38cff0b1 356
a5266db4
HG
357 iosf_mbi_assert_punit_acquired();
358
b2cff0db
CW
359 /* Hold uncore.lock across reset to prevent any register access
360 * with forcewake not set correctly. Wait until all pending
361 * timers are run before holding.
362 */
363 while (1) {
d2dc94bc
CW
364 unsigned int tmp;
365
b2cff0db 366 active_domains = 0;
38cff0b1 367
d2dc94bc 368 for_each_fw_domain(domain, dev_priv, tmp) {
c9e0c6da 369 smp_store_mb(domain->active, false);
a57a4a67 370 if (hrtimer_cancel(&domain->timer) == 0)
b2cff0db 371 continue;
38cff0b1 372
a57a4a67 373 intel_uncore_fw_release_timer(&domain->timer);
b2cff0db 374 }
aec347ab 375
b2cff0db 376 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
b2ec142c 377
d2dc94bc 378 for_each_fw_domain(domain, dev_priv, tmp) {
a57a4a67 379 if (hrtimer_active(&domain->timer))
33c582c1 380 active_domains |= domain->mask;
b2cff0db 381 }
3123fcaf 382
b2cff0db
CW
383 if (active_domains == 0)
384 break;
aec347ab 385
b2cff0db
CW
386 if (--retry_count == 0) {
387 DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
388 break;
389 }
0294ae7b 390
b2cff0db
CW
391 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
392 cond_resched();
393 }
0294ae7b 394
b2cff0db
CW
395 WARN_ON(active_domains);
396
003342a5 397 fw = dev_priv->uncore.fw_domains_active;
b2cff0db
CW
398 if (fw)
399 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
ef46e0d2 400
cb3600db 401 fw_domains_reset(dev_priv, dev_priv->uncore.fw_domains);
38cff0b1 402
0294ae7b 403 if (restore) { /* If reset with a user forcewake, try to restore */
0294ae7b
CW
404 if (fw)
405 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
406
dc97997a 407 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
0294ae7b 408 dev_priv->uncore.fifo_count =
c32e3788 409 fifo_free_entries(dev_priv);
0294ae7b
CW
410 }
411
b2cff0db 412 if (!restore)
59bad947 413 assert_forcewakes_inactive(dev_priv);
b2cff0db 414
0294ae7b 415 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
ef46e0d2
DV
416}
417
c02e85a0
MK
418static u64 gen9_edram_size(struct drm_i915_private *dev_priv)
419{
420 const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
421 const unsigned int sets[4] = { 1, 1, 2, 2 };
422 const u32 cap = dev_priv->edram_cap;
423
424 return EDRAM_NUM_BANKS(cap) *
425 ways[EDRAM_WAYS_IDX(cap)] *
426 sets[EDRAM_SETS_IDX(cap)] *
427 1024 * 1024;
428}
429
3accaf7e 430u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv)
907b28c5 431{
3accaf7e
MK
432 if (!HAS_EDRAM(dev_priv))
433 return 0;
434
c02e85a0
MK
435 /* The needed capability bits for size calculation
436 * are not there with pre gen9 so return 128MB always.
3accaf7e 437 */
c02e85a0
MK
438 if (INTEL_GEN(dev_priv) < 9)
439 return 128 * 1024 * 1024;
3accaf7e 440
c02e85a0 441 return gen9_edram_size(dev_priv);
3accaf7e 442}
907b28c5 443
3accaf7e
MK
444static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv)
445{
446 if (IS_HASWELL(dev_priv) ||
447 IS_BROADWELL(dev_priv) ||
448 INTEL_GEN(dev_priv) >= 9) {
449 dev_priv->edram_cap = __raw_i915_read32(dev_priv,
450 HSW_EDRAM_CAP);
451
452 /* NB: We can't write IDICR yet because we do not have gt funcs
18ce3994 453 * set up */
3accaf7e
MK
454 } else {
455 dev_priv->edram_cap = 0;
18ce3994 456 }
3accaf7e
MK
457
458 if (HAS_EDRAM(dev_priv))
459 DRM_INFO("Found %lluMB of eDRAM\n",
460 intel_uncore_edram_size(dev_priv) / (1024 * 1024));
f9b3927a
MK
461}
462
8a47eb19 463static bool
8ac3e1bb 464fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
8a47eb19
MK
465{
466 u32 dbg;
467
8a47eb19
MK
468 dbg = __raw_i915_read32(dev_priv, FPGA_DBG);
469 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
470 return false;
471
472 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
473
474 return true;
475}
476
8ac3e1bb
MK
477static bool
478vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
479{
480 u32 cer;
481
482 cer = __raw_i915_read32(dev_priv, CLAIM_ER);
483 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
484 return false;
485
486 __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR);
487
488 return true;
489}
490
a338908c
MK
491static bool
492gen6_check_for_fifo_debug(struct drm_i915_private *dev_priv)
493{
494 u32 fifodbg;
495
496 fifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
497
498 if (unlikely(fifodbg)) {
499 DRM_DEBUG_DRIVER("GTFIFODBG = 0x08%x\n", fifodbg);
500 __raw_i915_write32(dev_priv, GTFIFODBG, fifodbg);
501 }
502
503 return fifodbg;
504}
505
8ac3e1bb
MK
506static bool
507check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
508{
a338908c
MK
509 bool ret = false;
510
8ac3e1bb 511 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
a338908c 512 ret |= fpga_check_for_unclaimed_mmio(dev_priv);
8ac3e1bb
MK
513
514 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
a338908c
MK
515 ret |= vlv_check_for_unclaimed_mmio(dev_priv);
516
517 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
518 ret |= gen6_check_for_fifo_debug(dev_priv);
8ac3e1bb 519
a338908c 520 return ret;
8ac3e1bb
MK
521}
522
dc97997a 523static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
f9b3927a
MK
524 bool restore_forcewake)
525{
8a47eb19
MK
526 /* clear out unclaimed reg detection bit */
527 if (check_for_unclaimed_mmio(dev_priv))
528 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
907b28c5 529
a04f90a3 530 /* WaDisableShadowRegForCpd:chv */
dc97997a 531 if (IS_CHERRYVIEW(dev_priv)) {
a04f90a3
D
532 __raw_i915_write32(dev_priv, GTFIFOCTL,
533 __raw_i915_read32(dev_priv, GTFIFOCTL) |
534 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
535 GT_FIFO_CTL_RC6_POLICY_STALL);
536 }
537
a5266db4 538 iosf_mbi_punit_acquire();
dc97997a 539 intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
a5266db4 540 iosf_mbi_punit_release();
521198a2
MK
541}
542
68f60946 543void intel_uncore_suspend(struct drm_i915_private *dev_priv)
ed493883 544{
a5266db4
HG
545 iosf_mbi_punit_acquire();
546 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
264ec1a8 547 &dev_priv->uncore.pmic_bus_access_nb);
68f60946 548 intel_uncore_forcewake_reset(dev_priv, false);
a5266db4 549 iosf_mbi_punit_release();
68f60946
HG
550}
551
552void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
553{
554 __intel_uncore_early_sanitize(dev_priv, true);
264ec1a8
HG
555 iosf_mbi_register_pmic_bus_access_notifier(
556 &dev_priv->uncore.pmic_bus_access_nb);
dc97997a 557 i915_check_and_clear_faults(dev_priv);
ed493883
ID
558}
559
dc97997a 560void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
521198a2 561{
4f044a88
MW
562 i915_modparams.enable_rc6 =
563 sanitize_rc6_option(dev_priv, i915_modparams.enable_rc6);
274008e8 564
907b28c5 565 /* BIOS often leaves RC6 enabled, but disable it for hw init */
54b4f68f 566 intel_sanitize_gt_powersave(dev_priv);
907b28c5
CW
567}
568
a6111f7b
CW
569static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
570 enum forcewake_domains fw_domains)
571{
572 struct intel_uncore_forcewake_domain *domain;
d2dc94bc 573 unsigned int tmp;
a6111f7b 574
a6111f7b
CW
575 fw_domains &= dev_priv->uncore.fw_domains;
576
c9e0c6da
CW
577 for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) {
578 if (domain->wake_count++) {
33c582c1 579 fw_domains &= ~domain->mask;
c9e0c6da
CW
580 domain->active = true;
581 }
582 }
a6111f7b 583
b8473050 584 if (fw_domains)
a6111f7b
CW
585 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
586}
587
59bad947
MK
588/**
589 * intel_uncore_forcewake_get - grab forcewake domain references
590 * @dev_priv: i915 device instance
591 * @fw_domains: forcewake domains to get reference on
592 *
593 * This function can be used get GT's forcewake domain references.
594 * Normal register access will handle the forcewake domains automatically.
595 * However if some sequence requires the GT to not power down a particular
596 * forcewake domains this function should be called at the beginning of the
597 * sequence. And subsequently the reference should be dropped by symmetric
598 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
599 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
907b28c5 600 */
59bad947 601void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
48c1026a 602 enum forcewake_domains fw_domains)
907b28c5
CW
603{
604 unsigned long irqflags;
605
ab484f8f
BW
606 if (!dev_priv->uncore.funcs.force_wake_get)
607 return;
608
c9b8846a 609 assert_rpm_wakelock_held(dev_priv);
c8c8fb33 610
6daccb0b 611 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
a6111f7b 612 __intel_uncore_forcewake_get(dev_priv, fw_domains);
907b28c5
CW
613 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
614}
615
d7a133d8
CW
616/**
617 * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
618 * @dev_priv: i915 device instance
619 *
620 * This function is a wrapper around intel_uncore_forcewake_get() to acquire
621 * the GT powerwell and in the process disable our debugging for the
622 * duration of userspace's bypass.
623 */
624void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv)
625{
626 spin_lock_irq(&dev_priv->uncore.lock);
627 if (!dev_priv->uncore.user_forcewake.count++) {
628 intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
629
630 /* Save and disable mmio debugging for the user bypass */
631 dev_priv->uncore.user_forcewake.saved_mmio_check =
632 dev_priv->uncore.unclaimed_mmio_check;
633 dev_priv->uncore.user_forcewake.saved_mmio_debug =
4f044a88 634 i915_modparams.mmio_debug;
d7a133d8
CW
635
636 dev_priv->uncore.unclaimed_mmio_check = 0;
4f044a88 637 i915_modparams.mmio_debug = 0;
d7a133d8
CW
638 }
639 spin_unlock_irq(&dev_priv->uncore.lock);
640}
641
642/**
643 * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
644 * @dev_priv: i915 device instance
645 *
646 * This function complements intel_uncore_forcewake_user_get() and releases
647 * the GT powerwell taken on behalf of the userspace bypass.
648 */
649void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv)
650{
651 spin_lock_irq(&dev_priv->uncore.lock);
652 if (!--dev_priv->uncore.user_forcewake.count) {
653 if (intel_uncore_unclaimed_mmio(dev_priv))
654 dev_info(dev_priv->drm.dev,
655 "Invalid mmio detected during user access\n");
656
657 dev_priv->uncore.unclaimed_mmio_check =
658 dev_priv->uncore.user_forcewake.saved_mmio_check;
4f044a88 659 i915_modparams.mmio_debug =
d7a133d8
CW
660 dev_priv->uncore.user_forcewake.saved_mmio_debug;
661
662 intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
663 }
664 spin_unlock_irq(&dev_priv->uncore.lock);
665}
666
59bad947 667/**
a6111f7b 668 * intel_uncore_forcewake_get__locked - grab forcewake domain references
59bad947 669 * @dev_priv: i915 device instance
a6111f7b 670 * @fw_domains: forcewake domains to get reference on
59bad947 671 *
a6111f7b
CW
672 * See intel_uncore_forcewake_get(). This variant places the onus
673 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
907b28c5 674 */
a6111f7b
CW
675void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
676 enum forcewake_domains fw_domains)
677{
67520415 678 lockdep_assert_held(&dev_priv->uncore.lock);
a6111f7b
CW
679
680 if (!dev_priv->uncore.funcs.force_wake_get)
681 return;
682
683 __intel_uncore_forcewake_get(dev_priv, fw_domains);
684}
685
686static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
687 enum forcewake_domains fw_domains)
907b28c5 688{
b2cff0db 689 struct intel_uncore_forcewake_domain *domain;
d2dc94bc 690 unsigned int tmp;
907b28c5 691
b2cff0db
CW
692 fw_domains &= dev_priv->uncore.fw_domains;
693
d2dc94bc 694 for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) {
b2cff0db
CW
695 if (WARN_ON(domain->wake_count == 0))
696 continue;
697
c9e0c6da
CW
698 if (--domain->wake_count) {
699 domain->active = true;
b2cff0db 700 continue;
c9e0c6da 701 }
b2cff0db 702
05a2fb15 703 fw_domain_arm_timer(domain);
aec347ab 704 }
a6111f7b 705}
dc9fb09c 706
a6111f7b
CW
707/**
708 * intel_uncore_forcewake_put - release a forcewake domain reference
709 * @dev_priv: i915 device instance
710 * @fw_domains: forcewake domains to put references
711 *
712 * This function drops the device-level forcewakes for specified
713 * domains obtained by intel_uncore_forcewake_get().
714 */
715void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
716 enum forcewake_domains fw_domains)
717{
718 unsigned long irqflags;
719
720 if (!dev_priv->uncore.funcs.force_wake_put)
721 return;
722
723 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
724 __intel_uncore_forcewake_put(dev_priv, fw_domains);
907b28c5
CW
725 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
726}
727
a6111f7b
CW
728/**
729 * intel_uncore_forcewake_put__locked - grab forcewake domain references
730 * @dev_priv: i915 device instance
731 * @fw_domains: forcewake domains to get reference on
732 *
733 * See intel_uncore_forcewake_put(). This variant places the onus
734 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
735 */
736void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
737 enum forcewake_domains fw_domains)
738{
67520415 739 lockdep_assert_held(&dev_priv->uncore.lock);
a6111f7b
CW
740
741 if (!dev_priv->uncore.funcs.force_wake_put)
742 return;
743
744 __intel_uncore_forcewake_put(dev_priv, fw_domains);
745}
746
59bad947 747void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
e998c40f
PZ
748{
749 if (!dev_priv->uncore.funcs.force_wake_get)
750 return;
751
67e64564
CW
752 WARN(dev_priv->uncore.fw_domains_active,
753 "Expected all fw_domains to be inactive, but %08x are still on\n",
754 dev_priv->uncore.fw_domains_active);
755}
756
757void assert_forcewakes_active(struct drm_i915_private *dev_priv,
758 enum forcewake_domains fw_domains)
759{
760 if (!dev_priv->uncore.funcs.force_wake_get)
761 return;
762
763 assert_rpm_wakelock_held(dev_priv);
764
765 fw_domains &= dev_priv->uncore.fw_domains;
766 WARN(fw_domains & ~dev_priv->uncore.fw_domains_active,
767 "Expected %08x fw_domains to be active, but %08x are off\n",
768 fw_domains, fw_domains & ~dev_priv->uncore.fw_domains_active);
e998c40f
PZ
769}
770
907b28c5 771/* We give fast paths for the really cool registers */
40181697 772#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
907b28c5 773
6863b76c
TU
774#define __gen6_reg_read_fw_domains(offset) \
775({ \
776 enum forcewake_domains __fwd; \
777 if (NEEDS_FORCE_WAKE(offset)) \
778 __fwd = FORCEWAKE_RENDER; \
779 else \
780 __fwd = 0; \
781 __fwd; \
782})
783
9480dbf0 784static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
91e630b9 785{
91e630b9
TU
786 if (offset < entry->start)
787 return -1;
788 else if (offset > entry->end)
789 return 1;
790 else
791 return 0;
792}
793
9480dbf0
TU
794/* Copied and "macroized" from lib/bsearch.c */
795#define BSEARCH(key, base, num, cmp) ({ \
796 unsigned int start__ = 0, end__ = (num); \
797 typeof(base) result__ = NULL; \
798 while (start__ < end__) { \
799 unsigned int mid__ = start__ + (end__ - start__) / 2; \
800 int ret__ = (cmp)((key), (base) + mid__); \
801 if (ret__ < 0) { \
802 end__ = mid__; \
803 } else if (ret__ > 0) { \
804 start__ = mid__ + 1; \
805 } else { \
806 result__ = (base) + mid__; \
807 break; \
808 } \
809 } \
810 result__; \
811})
812
9fc1117c 813static enum forcewake_domains
15157970 814find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
9fc1117c 815{
9480dbf0 816 const struct intel_forcewake_range *entry;
9fc1117c 817
9480dbf0
TU
818 entry = BSEARCH(offset,
819 dev_priv->uncore.fw_domains_table,
820 dev_priv->uncore.fw_domains_table_entries,
91e630b9 821 fw_range_cmp);
38fb6a40 822
99191427
JL
823 if (!entry)
824 return 0;
825
826 WARN(entry->domains & ~dev_priv->uncore.fw_domains,
827 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
828 entry->domains & ~dev_priv->uncore.fw_domains, offset);
829
830 return entry->domains;
9fc1117c
TU
831}
832
833#define GEN_FW_RANGE(s, e, d) \
834 { .start = (s), .end = (e), .domains = (d) }
1938e59a 835
895833bd 836#define HAS_FWTABLE(dev_priv) \
3d16ca58 837 (INTEL_GEN(dev_priv) >= 9 || \
895833bd
TU
838 IS_CHERRYVIEW(dev_priv) || \
839 IS_VALLEYVIEW(dev_priv))
840
b0081239 841/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
9fc1117c
TU
842static const struct intel_forcewake_range __vlv_fw_ranges[] = {
843 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
844 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
845 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
9fc1117c
TU
846 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
847 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
b0081239 848 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
9fc1117c
TU
849 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
850};
1938e59a 851
895833bd 852#define __fwtable_reg_read_fw_domains(offset) \
6863b76c
TU
853({ \
854 enum forcewake_domains __fwd = 0; \
0dd356bb 855 if (NEEDS_FORCE_WAKE((offset))) \
15157970 856 __fwd = find_fw_domain(dev_priv, offset); \
6863b76c
TU
857 __fwd; \
858})
859
47188574 860/* *Must* be sorted by offset! See intel_shadow_table_check(). */
6863b76c 861static const i915_reg_t gen8_shadowed_regs[] = {
47188574
TU
862 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
863 GEN6_RPNSWREQ, /* 0xA008 */
864 GEN6_RC_VIDEO_FREQ, /* 0xA00C */
865 RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */
866 RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */
867 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
6863b76c
TU
868 /* TODO: Other registers are not yet used */
869};
870
9480dbf0 871static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
5a659383 872{
9480dbf0 873 u32 offset = i915_mmio_reg_offset(*reg);
5a659383 874
9480dbf0 875 if (key < offset)
5a659383 876 return -1;
9480dbf0 877 else if (key > offset)
5a659383
TU
878 return 1;
879 else
880 return 0;
881}
882
6863b76c
TU
883static bool is_gen8_shadowed(u32 offset)
884{
9480dbf0 885 const i915_reg_t *regs = gen8_shadowed_regs;
5a659383 886
9480dbf0
TU
887 return BSEARCH(offset, regs, ARRAY_SIZE(gen8_shadowed_regs),
888 mmio_reg_cmp);
6863b76c
TU
889}
890
891#define __gen8_reg_write_fw_domains(offset) \
892({ \
893 enum forcewake_domains __fwd; \
894 if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
895 __fwd = FORCEWAKE_RENDER; \
896 else \
897 __fwd = 0; \
898 __fwd; \
899})
900
b0081239 901/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
9fc1117c
TU
902static const struct intel_forcewake_range __chv_fw_ranges[] = {
903 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
b0081239 904 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c 905 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
b0081239 906 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c 907 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
b0081239 908 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c 909 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
b0081239
TU
910 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
911 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
9fc1117c 912 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
b0081239
TU
913 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
914 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c
TU
915 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
916 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
917 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
918 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
9fc1117c 919};
38fb6a40 920
22d48c55 921#define __fwtable_reg_write_fw_domains(offset) \
6863b76c
TU
922({ \
923 enum forcewake_domains __fwd = 0; \
0dd356bb 924 if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
15157970 925 __fwd = find_fw_domain(dev_priv, offset); \
6863b76c
TU
926 __fwd; \
927})
928
b0081239 929/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
9fc1117c 930static const struct intel_forcewake_range __gen9_fw_ranges[] = {
0dd356bb 931 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
9fc1117c
TU
932 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
933 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
0dd356bb 934 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
9fc1117c 935 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
0dd356bb 936 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
9fc1117c 937 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
0dd356bb 938 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER),
b0081239 939 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
9fc1117c 940 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
0dd356bb 941 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
9fc1117c 942 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
0dd356bb 943 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
b0081239 944 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
0dd356bb 945 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER),
9fc1117c 946 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
0dd356bb 947 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
9fc1117c 948 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
0dd356bb 949 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
b0081239 950 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
78424c92 951 GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER),
9fc1117c 952 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
0dd356bb 953 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER),
b0081239 954 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
0dd356bb 955 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER),
9fc1117c 956 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
0dd356bb 957 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER),
9fc1117c 958 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
0dd356bb 959 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER),
b0081239 960 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
0dd356bb 961 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER),
9fc1117c
TU
962 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
963};
6863b76c 964
907b28c5
CW
965static void
966ilk_dummy_write(struct drm_i915_private *dev_priv)
967{
968 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
969 * the chip from rc6 before touching it for real. MI_MODE is masked,
970 * hence harmless to write 0 into. */
6af5d92f 971 __raw_i915_write32(dev_priv, MI_MODE, 0);
907b28c5
CW
972}
973
974static void
9c053501
MK
975__unclaimed_reg_debug(struct drm_i915_private *dev_priv,
976 const i915_reg_t reg,
977 const bool read,
978 const bool before)
907b28c5 979{
dda96033
CW
980 if (WARN(check_for_unclaimed_mmio(dev_priv) && !before,
981 "Unclaimed %s register 0x%x\n",
982 read ? "read from" : "write to",
4bd0a25d 983 i915_mmio_reg_offset(reg)))
4f044a88
MW
984 /* Only report the first N failures */
985 i915_modparams.mmio_debug--;
907b28c5
CW
986}
987
9c053501
MK
988static inline void
989unclaimed_reg_debug(struct drm_i915_private *dev_priv,
990 const i915_reg_t reg,
991 const bool read,
992 const bool before)
993{
4f044a88 994 if (likely(!i915_modparams.mmio_debug))
9c053501
MK
995 return;
996
997 __unclaimed_reg_debug(dev_priv, reg, read, before);
998}
999
51f67885 1000#define GEN2_READ_HEADER(x) \
5d738795 1001 u##x val = 0; \
da5827c3 1002 assert_rpm_wakelock_held(dev_priv);
5d738795 1003
51f67885 1004#define GEN2_READ_FOOTER \
5d738795
BW
1005 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1006 return val
1007
51f67885 1008#define __gen2_read(x) \
0b274481 1009static u##x \
f0f59a00 1010gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
51f67885 1011 GEN2_READ_HEADER(x); \
3967018e 1012 val = __raw_i915_read##x(dev_priv, reg); \
51f67885 1013 GEN2_READ_FOOTER; \
3967018e
BW
1014}
1015
1016#define __gen5_read(x) \
1017static u##x \
f0f59a00 1018gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
51f67885 1019 GEN2_READ_HEADER(x); \
3967018e
BW
1020 ilk_dummy_write(dev_priv); \
1021 val = __raw_i915_read##x(dev_priv, reg); \
51f67885 1022 GEN2_READ_FOOTER; \
3967018e
BW
1023}
1024
51f67885
CW
1025__gen5_read(8)
1026__gen5_read(16)
1027__gen5_read(32)
1028__gen5_read(64)
1029__gen2_read(8)
1030__gen2_read(16)
1031__gen2_read(32)
1032__gen2_read(64)
1033
1034#undef __gen5_read
1035#undef __gen2_read
1036
1037#undef GEN2_READ_FOOTER
1038#undef GEN2_READ_HEADER
1039
1040#define GEN6_READ_HEADER(x) \
f0f59a00 1041 u32 offset = i915_mmio_reg_offset(reg); \
51f67885
CW
1042 unsigned long irqflags; \
1043 u##x val = 0; \
da5827c3 1044 assert_rpm_wakelock_held(dev_priv); \
9c053501
MK
1045 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
1046 unclaimed_reg_debug(dev_priv, reg, true, true)
51f67885
CW
1047
1048#define GEN6_READ_FOOTER \
9c053501 1049 unclaimed_reg_debug(dev_priv, reg, true, false); \
51f67885
CW
1050 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
1051 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1052 return val
1053
c521b0c8
TU
1054static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
1055 enum forcewake_domains fw_domains)
b2cff0db
CW
1056{
1057 struct intel_uncore_forcewake_domain *domain;
d2dc94bc
CW
1058 unsigned int tmp;
1059
1060 GEM_BUG_ON(fw_domains & ~dev_priv->uncore.fw_domains);
b2cff0db 1061
d2dc94bc 1062 for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp)
c521b0c8
TU
1063 fw_domain_arm_timer(domain);
1064
1065 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
c521b0c8
TU
1066}
1067
1068static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
1069 enum forcewake_domains fw_domains)
1070{
b2cff0db
CW
1071 if (WARN_ON(!fw_domains))
1072 return;
1073
003342a5
TU
1074 /* Turn on all requested but inactive supported forcewake domains. */
1075 fw_domains &= dev_priv->uncore.fw_domains;
1076 fw_domains &= ~dev_priv->uncore.fw_domains_active;
b2cff0db 1077
c521b0c8
TU
1078 if (fw_domains)
1079 ___force_wake_auto(dev_priv, fw_domains);
b2cff0db
CW
1080}
1081
ccfceda2 1082#define __gen_read(func, x) \
3967018e 1083static u##x \
ccfceda2 1084func##_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
6863b76c 1085 enum forcewake_domains fw_engine; \
51f67885 1086 GEN6_READ_HEADER(x); \
ccfceda2 1087 fw_engine = __##func##_reg_read_fw_domains(offset); \
6a42d0f4 1088 if (fw_engine) \
b208ba8e 1089 __force_wake_auto(dev_priv, fw_engine); \
6fe72865 1090 val = __raw_i915_read##x(dev_priv, reg); \
51f67885 1091 GEN6_READ_FOOTER; \
940aece4 1092}
ccfceda2
DCS
1093#define __gen6_read(x) __gen_read(gen6, x)
1094#define __fwtable_read(x) __gen_read(fwtable, x)
940aece4 1095
6044c4a3
TU
1096__fwtable_read(8)
1097__fwtable_read(16)
1098__fwtable_read(32)
1099__fwtable_read(64)
3967018e
BW
1100__gen6_read(8)
1101__gen6_read(16)
1102__gen6_read(32)
1103__gen6_read(64)
3967018e 1104
6044c4a3 1105#undef __fwtable_read
3967018e 1106#undef __gen6_read
51f67885
CW
1107#undef GEN6_READ_FOOTER
1108#undef GEN6_READ_HEADER
5d738795 1109
51f67885 1110#define GEN2_WRITE_HEADER \
5d738795 1111 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
da5827c3 1112 assert_rpm_wakelock_held(dev_priv); \
907b28c5 1113
51f67885 1114#define GEN2_WRITE_FOOTER
0d965301 1115
51f67885 1116#define __gen2_write(x) \
0b274481 1117static void \
f0f59a00 1118gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
51f67885 1119 GEN2_WRITE_HEADER; \
4032ef43 1120 __raw_i915_write##x(dev_priv, reg, val); \
51f67885 1121 GEN2_WRITE_FOOTER; \
4032ef43
BW
1122}
1123
1124#define __gen5_write(x) \
1125static void \
f0f59a00 1126gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
51f67885 1127 GEN2_WRITE_HEADER; \
4032ef43
BW
1128 ilk_dummy_write(dev_priv); \
1129 __raw_i915_write##x(dev_priv, reg, val); \
51f67885 1130 GEN2_WRITE_FOOTER; \
4032ef43
BW
1131}
1132
51f67885
CW
1133__gen5_write(8)
1134__gen5_write(16)
1135__gen5_write(32)
51f67885
CW
1136__gen2_write(8)
1137__gen2_write(16)
1138__gen2_write(32)
51f67885
CW
1139
1140#undef __gen5_write
1141#undef __gen2_write
1142
1143#undef GEN2_WRITE_FOOTER
1144#undef GEN2_WRITE_HEADER
1145
1146#define GEN6_WRITE_HEADER \
f0f59a00 1147 u32 offset = i915_mmio_reg_offset(reg); \
51f67885
CW
1148 unsigned long irqflags; \
1149 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
da5827c3 1150 assert_rpm_wakelock_held(dev_priv); \
9c053501
MK
1151 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
1152 unclaimed_reg_debug(dev_priv, reg, false, true)
51f67885
CW
1153
1154#define GEN6_WRITE_FOOTER \
9c053501 1155 unclaimed_reg_debug(dev_priv, reg, false, false); \
51f67885
CW
1156 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
1157
4032ef43
BW
1158#define __gen6_write(x) \
1159static void \
f0f59a00 1160gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
51f67885 1161 GEN6_WRITE_HEADER; \
a338908c
MK
1162 if (NEEDS_FORCE_WAKE(offset)) \
1163 __gen6_gt_wait_for_fifo(dev_priv); \
4032ef43 1164 __raw_i915_write##x(dev_priv, reg, val); \
51f67885 1165 GEN6_WRITE_FOOTER; \
4032ef43
BW
1166}
1167
ccfceda2 1168#define __gen_write(func, x) \
ab2aa47e 1169static void \
ccfceda2 1170func##_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
6863b76c 1171 enum forcewake_domains fw_engine; \
51f67885 1172 GEN6_WRITE_HEADER; \
ccfceda2 1173 fw_engine = __##func##_reg_write_fw_domains(offset); \
6a42d0f4 1174 if (fw_engine) \
b208ba8e 1175 __force_wake_auto(dev_priv, fw_engine); \
1938e59a 1176 __raw_i915_write##x(dev_priv, reg, val); \
51f67885 1177 GEN6_WRITE_FOOTER; \
1938e59a 1178}
ccfceda2
DCS
1179#define __gen8_write(x) __gen_write(gen8, x)
1180#define __fwtable_write(x) __gen_write(fwtable, x)
1938e59a 1181
22d48c55
TU
1182__fwtable_write(8)
1183__fwtable_write(16)
1184__fwtable_write(32)
ab2aa47e
BW
1185__gen8_write(8)
1186__gen8_write(16)
1187__gen8_write(32)
4032ef43
BW
1188__gen6_write(8)
1189__gen6_write(16)
1190__gen6_write(32)
4032ef43 1191
22d48c55 1192#undef __fwtable_write
ab2aa47e 1193#undef __gen8_write
4032ef43 1194#undef __gen6_write
51f67885
CW
1195#undef GEN6_WRITE_FOOTER
1196#undef GEN6_WRITE_HEADER
907b28c5 1197
0757ac8f 1198#define ASSIGN_WRITE_MMIO_VFUNCS(i915, x) \
43d942a7 1199do { \
0757ac8f
CW
1200 (i915)->uncore.funcs.mmio_writeb = x##_write8; \
1201 (i915)->uncore.funcs.mmio_writew = x##_write16; \
1202 (i915)->uncore.funcs.mmio_writel = x##_write32; \
43d942a7
YZ
1203} while (0)
1204
0757ac8f 1205#define ASSIGN_READ_MMIO_VFUNCS(i915, x) \
43d942a7 1206do { \
0757ac8f
CW
1207 (i915)->uncore.funcs.mmio_readb = x##_read8; \
1208 (i915)->uncore.funcs.mmio_readw = x##_read16; \
1209 (i915)->uncore.funcs.mmio_readl = x##_read32; \
1210 (i915)->uncore.funcs.mmio_readq = x##_read64; \
43d942a7
YZ
1211} while (0)
1212
05a2fb15
MK
1213
1214static void fw_domain_init(struct drm_i915_private *dev_priv,
48c1026a 1215 enum forcewake_domain_id domain_id,
f0f59a00
VS
1216 i915_reg_t reg_set,
1217 i915_reg_t reg_ack)
05a2fb15
MK
1218{
1219 struct intel_uncore_forcewake_domain *d;
1220
1221 if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
1222 return;
1223
1224 d = &dev_priv->uncore.fw_domain[domain_id];
1225
1226 WARN_ON(d->wake_count);
1227
6e3955a5
CW
1228 WARN_ON(!i915_mmio_reg_valid(reg_set));
1229 WARN_ON(!i915_mmio_reg_valid(reg_ack));
1230
05a2fb15
MK
1231 d->wake_count = 0;
1232 d->reg_set = reg_set;
1233 d->reg_ack = reg_ack;
1234
05a2fb15
MK
1235 d->id = domain_id;
1236
33c582c1
TU
1237 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
1238 BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
1239 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
1240
d2dc94bc 1241 d->mask = BIT(domain_id);
33c582c1 1242
a57a4a67
TU
1243 hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1244 d->timer.function = intel_uncore_fw_release_timer;
05a2fb15 1245
6e3955a5 1246 dev_priv->uncore.fw_domains |= BIT(domain_id);
f9b3927a 1247
577ac4bd 1248 fw_domain_reset(dev_priv, d);
05a2fb15
MK
1249}
1250
dc97997a 1251static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
0b274481 1252{
e3b1895f 1253 if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv))
3225b2f9
MK
1254 return;
1255
6e3955a5
CW
1256 if (IS_GEN6(dev_priv)) {
1257 dev_priv->uncore.fw_reset = 0;
1258 dev_priv->uncore.fw_set = FORCEWAKE_KERNEL;
1259 dev_priv->uncore.fw_clear = 0;
1260 } else {
1261 /* WaRsClearFWBitsAtReset:bdw,skl */
1262 dev_priv->uncore.fw_reset = _MASKED_BIT_DISABLE(0xffff);
1263 dev_priv->uncore.fw_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
1264 dev_priv->uncore.fw_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
1265 }
1266
3d16ca58 1267 if (INTEL_GEN(dev_priv) >= 9) {
71306303
MK
1268 dev_priv->uncore.funcs.force_wake_get =
1269 fw_domains_get_with_fallback;
05a2fb15
MK
1270 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1271 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1272 FORCEWAKE_RENDER_GEN9,
1273 FORCEWAKE_ACK_RENDER_GEN9);
1274 fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
1275 FORCEWAKE_BLITTER_GEN9,
1276 FORCEWAKE_ACK_BLITTER_GEN9);
1277 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1278 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
dc97997a 1279 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
05a2fb15 1280 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
a338908c 1281 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
05a2fb15
MK
1282 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1283 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1284 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1285 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
dc97997a 1286 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
05a2fb15
MK
1287 dev_priv->uncore.funcs.force_wake_get =
1288 fw_domains_get_with_thread_status;
a338908c 1289 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
05a2fb15
MK
1290 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1291 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
dc97997a 1292 } else if (IS_IVYBRIDGE(dev_priv)) {
0b274481
BW
1293 u32 ecobus;
1294
1295 /* IVB configs may use multi-threaded forcewake */
1296
1297 /* A small trick here - if the bios hasn't configured
1298 * MT forcewake, and if the device is in RC6, then
1299 * force_wake_mt_get will not wake the device and the
1300 * ECOBUS read will return zero. Which will be
1301 * (correctly) interpreted by the test below as MT
1302 * forcewake being disabled.
1303 */
05a2fb15
MK
1304 dev_priv->uncore.funcs.force_wake_get =
1305 fw_domains_get_with_thread_status;
a338908c 1306 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
05a2fb15 1307
f9b3927a
MK
1308 /* We need to init first for ECOBUS access and then
1309 * determine later if we want to reinit, in case of MT access is
6ea2556f
MK
1310 * not working. In this stage we don't know which flavour this
1311 * ivb is, so it is better to reset also the gen6 fw registers
1312 * before the ecobus check.
f9b3927a 1313 */
6ea2556f
MK
1314
1315 __raw_i915_write32(dev_priv, FORCEWAKE, 0);
1316 __raw_posting_read(dev_priv, ECOBUS);
1317
05a2fb15
MK
1318 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1319 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
f9b3927a 1320
556ab7a6 1321 spin_lock_irq(&dev_priv->uncore.lock);
bd527504 1322 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_RENDER);
0b274481 1323 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
a338908c 1324 fw_domains_put(dev_priv, FORCEWAKE_RENDER);
556ab7a6 1325 spin_unlock_irq(&dev_priv->uncore.lock);
0b274481 1326
05a2fb15 1327 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
0b274481
BW
1328 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1329 DRM_INFO("when using vblank-synced partial screen updates.\n");
05a2fb15
MK
1330 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1331 FORCEWAKE, FORCEWAKE_ACK);
0b274481 1332 }
dc97997a 1333 } else if (IS_GEN6(dev_priv)) {
0b274481 1334 dev_priv->uncore.funcs.force_wake_get =
05a2fb15 1335 fw_domains_get_with_thread_status;
a338908c 1336 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
05a2fb15
MK
1337 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1338 FORCEWAKE, FORCEWAKE_ACK);
0b274481 1339 }
3225b2f9
MK
1340
1341 /* All future platforms are expected to require complex power gating */
1342 WARN_ON(dev_priv->uncore.fw_domains == 0);
f9b3927a
MK
1343}
1344
15157970
TU
1345#define ASSIGN_FW_DOMAINS_TABLE(d) \
1346{ \
1347 dev_priv->uncore.fw_domains_table = \
1348 (struct intel_forcewake_range *)(d); \
1349 dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \
1350}
1351
264ec1a8
HG
1352static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
1353 unsigned long action, void *data)
1354{
1355 struct drm_i915_private *dev_priv = container_of(nb,
1356 struct drm_i915_private, uncore.pmic_bus_access_nb);
1357
1358 switch (action) {
1359 case MBI_PMIC_BUS_ACCESS_BEGIN:
1360 /*
1361 * forcewake all now to make sure that we don't need to do a
1362 * forcewake later which on systems where this notifier gets
1363 * called requires the punit to access to the shared pmic i2c
1364 * bus, which will be busy after this notification, leading to:
1365 * "render: timed out waiting for forcewake ack request."
1366 * errors.
ce30560c
HG
1367 *
1368 * The notifier is unregistered during intel_runtime_suspend(),
1369 * so it's ok to access the HW here without holding a RPM
1370 * wake reference -> disable wakeref asserts for the time of
1371 * the access.
264ec1a8 1372 */
ce30560c 1373 disable_rpm_wakeref_asserts(dev_priv);
264ec1a8 1374 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
ce30560c 1375 enable_rpm_wakeref_asserts(dev_priv);
264ec1a8
HG
1376 break;
1377 case MBI_PMIC_BUS_ACCESS_END:
1378 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1379 break;
1380 }
1381
1382 return NOTIFY_OK;
1383}
1384
dc97997a 1385void intel_uncore_init(struct drm_i915_private *dev_priv)
f9b3927a 1386{
dc97997a 1387 i915_check_vgpu(dev_priv);
cf9d2890 1388
3accaf7e 1389 intel_uncore_edram_detect(dev_priv);
dc97997a
CW
1390 intel_uncore_fw_domains_init(dev_priv);
1391 __intel_uncore_early_sanitize(dev_priv, false);
0b274481 1392
75714940 1393 dev_priv->uncore.unclaimed_mmio_check = 1;
264ec1a8
HG
1394 dev_priv->uncore.pmic_bus_access_nb.notifier_call =
1395 i915_pmic_bus_access_notifier;
75714940 1396
e3b1895f 1397 if (IS_GEN(dev_priv, 2, 4) || intel_vgpu_active(dev_priv)) {
0757ac8f
CW
1398 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen2);
1399 ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen2);
e3b1895f 1400 } else if (IS_GEN5(dev_priv)) {
0757ac8f
CW
1401 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen5);
1402 ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen5);
e3b1895f 1403 } else if (IS_GEN(dev_priv, 6, 7)) {
0757ac8f 1404 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen6);
e3b1895f
TU
1405
1406 if (IS_VALLEYVIEW(dev_priv)) {
1407 ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges);
0757ac8f 1408 ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
e3b1895f 1409 } else {
0757ac8f 1410 ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
85ee17eb 1411 }
e3b1895f 1412 } else if (IS_GEN8(dev_priv)) {
dc97997a 1413 if (IS_CHERRYVIEW(dev_priv)) {
15157970 1414 ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
0757ac8f
CW
1415 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
1416 ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
1938e59a
D
1417
1418 } else {
0757ac8f
CW
1419 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen8);
1420 ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
1938e59a 1421 }
e3b1895f
TU
1422 } else {
1423 ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
0757ac8f
CW
1424 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
1425 ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
3967018e 1426 }
ed493883 1427
264ec1a8
HG
1428 iosf_mbi_register_pmic_bus_access_notifier(
1429 &dev_priv->uncore.pmic_bus_access_nb);
0b274481
BW
1430}
1431
dc97997a 1432void intel_uncore_fini(struct drm_i915_private *dev_priv)
0b274481 1433{
0b274481 1434 /* Paranoia: make sure we have disabled everything before we exit. */
dc97997a 1435 intel_uncore_sanitize(dev_priv);
a5266db4
HG
1436
1437 iosf_mbi_punit_acquire();
1438 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
1439 &dev_priv->uncore.pmic_bus_access_nb);
dc97997a 1440 intel_uncore_forcewake_reset(dev_priv, false);
a5266db4 1441 iosf_mbi_punit_release();
0b274481
BW
1442}
1443
3fd3a6ff
JL
1444static const struct reg_whitelist {
1445 i915_reg_t offset_ldw;
1446 i915_reg_t offset_udw;
1447 u16 gen_mask;
1448 u8 size;
1449} reg_read_whitelist[] = { {
1450 .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
1451 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
1452 .gen_mask = INTEL_GEN_MASK(4, 10),
1453 .size = 8
1454} };
907b28c5
CW
1455
1456int i915_reg_read_ioctl(struct drm_device *dev,
1457 void *data, struct drm_file *file)
1458{
fac5e23e 1459 struct drm_i915_private *dev_priv = to_i915(dev);
907b28c5 1460 struct drm_i915_reg_read *reg = data;
3fd3a6ff
JL
1461 struct reg_whitelist const *entry;
1462 unsigned int flags;
1463 int remain;
1464 int ret = 0;
1465
1466 entry = reg_read_whitelist;
1467 remain = ARRAY_SIZE(reg_read_whitelist);
1468 while (remain) {
1469 u32 entry_offset = i915_mmio_reg_offset(entry->offset_ldw);
1470
1471 GEM_BUG_ON(!is_power_of_2(entry->size));
1472 GEM_BUG_ON(entry->size > 8);
1473 GEM_BUG_ON(entry_offset & (entry->size - 1));
1474
1475 if (INTEL_INFO(dev_priv)->gen_mask & entry->gen_mask &&
1476 entry_offset == (reg->offset & -entry->size))
907b28c5 1477 break;
3fd3a6ff
JL
1478 entry++;
1479 remain--;
907b28c5
CW
1480 }
1481
3fd3a6ff 1482 if (!remain)
907b28c5
CW
1483 return -EINVAL;
1484
3fd3a6ff 1485 flags = reg->offset & (entry->size - 1);
648a9bc5 1486
cf67c70f 1487 intel_runtime_pm_get(dev_priv);
3fd3a6ff
JL
1488 if (entry->size == 8 && flags == I915_REG_READ_8B_WA)
1489 reg->val = I915_READ64_2x32(entry->offset_ldw,
1490 entry->offset_udw);
1491 else if (entry->size == 8 && flags == 0)
1492 reg->val = I915_READ64(entry->offset_ldw);
1493 else if (entry->size == 4 && flags == 0)
1494 reg->val = I915_READ(entry->offset_ldw);
1495 else if (entry->size == 2 && flags == 0)
1496 reg->val = I915_READ16(entry->offset_ldw);
1497 else if (entry->size == 1 && flags == 0)
1498 reg->val = I915_READ8(entry->offset_ldw);
1499 else
cf67c70f 1500 ret = -EINVAL;
cf67c70f 1501 intel_runtime_pm_put(dev_priv);
3fd3a6ff 1502
cf67c70f 1503 return ret;
907b28c5
CW
1504}
1505
87de8d56
MK
1506static void gen3_stop_engine(struct intel_engine_cs *engine)
1507{
1508 struct drm_i915_private *dev_priv = engine->i915;
1509 const u32 base = engine->mmio_base;
1510 const i915_reg_t mode = RING_MI_MODE(base);
1511
1512 I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));
1513 if (intel_wait_for_register_fw(dev_priv,
1514 mode,
1515 MODE_IDLE,
1516 MODE_IDLE,
1517 500))
1518 DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n",
1519 engine->name);
1520
11caf551
CW
1521 I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base)));
1522
87de8d56
MK
1523 I915_WRITE_FW(RING_HEAD(base), 0);
1524 I915_WRITE_FW(RING_TAIL(base), 0);
1525
11caf551
CW
1526 /* The ring must be empty before it is disabled */
1527 I915_WRITE_FW(RING_CTL(base), 0);
1528
87de8d56
MK
1529 /* Check acts as a post */
1530 if (I915_READ_FW(RING_HEAD(base)) != 0)
1531 DRM_DEBUG_DRIVER("%s: ring head not parked\n",
1532 engine->name);
1533}
1534
1535static void i915_stop_engines(struct drm_i915_private *dev_priv,
1536 unsigned engine_mask)
2c80353f
MK
1537{
1538 struct intel_engine_cs *engine;
1539 enum intel_engine_id id;
1540
5896a5c8
CW
1541 if (INTEL_GEN(dev_priv) < 3)
1542 return;
1543
87de8d56
MK
1544 for_each_engine_masked(engine, dev_priv, engine_mask, id)
1545 gen3_stop_engine(engine);
2c80353f
MK
1546}
1547
9593a657 1548static bool i915_reset_complete(struct pci_dev *pdev)
907b28c5
CW
1549{
1550 u8 gdrst;
9593a657 1551
dc97997a 1552 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
73bbf6bd 1553 return (gdrst & GRDOM_RESET_STATUS) == 0;
907b28c5
CW
1554}
1555
dc97997a 1556static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
907b28c5 1557{
91c8a326 1558 struct pci_dev *pdev = dev_priv->drm.pdev;
dc97997a 1559
73bbf6bd 1560 /* assert reset for at least 20 usec */
dc97997a 1561 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
9593a657 1562 usleep_range(50, 200);
dc97997a 1563 pci_write_config_byte(pdev, I915_GDRST, 0);
907b28c5 1564
dc97997a 1565 return wait_for(i915_reset_complete(pdev), 500);
73bbf6bd
VS
1566}
1567
9593a657 1568static bool g4x_reset_complete(struct pci_dev *pdev)
73bbf6bd
VS
1569{
1570 u8 gdrst;
9593a657 1571
dc97997a 1572 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
73bbf6bd 1573 return (gdrst & GRDOM_RESET_ENABLE) == 0;
907b28c5
CW
1574}
1575
dc97997a 1576static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
408d4b9e 1577{
91c8a326 1578 struct pci_dev *pdev = dev_priv->drm.pdev;
9593a657 1579
dc97997a
CW
1580 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1581 return wait_for(g4x_reset_complete(pdev), 500);
408d4b9e
VS
1582}
1583
dc97997a 1584static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
fa4f53c4 1585{
91c8a326 1586 struct pci_dev *pdev = dev_priv->drm.pdev;
fa4f53c4
VS
1587 int ret;
1588
fa4f53c4 1589 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
44e1e7ba
CW
1590 I915_WRITE(VDECCLK_GATE_D,
1591 I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
fa4f53c4
VS
1592 POSTING_READ(VDECCLK_GATE_D);
1593
dc97997a 1594 pci_write_config_byte(pdev, I915_GDRST,
fa4f53c4 1595 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
dc97997a 1596 ret = wait_for(g4x_reset_complete(pdev), 500);
9593a657
CW
1597 if (ret) {
1598 DRM_DEBUG_DRIVER("Wait for media reset failed\n");
44e1e7ba 1599 goto out;
9593a657 1600 }
fa4f53c4 1601
44e1e7ba
CW
1602 pci_write_config_byte(pdev, I915_GDRST,
1603 GRDOM_RENDER | GRDOM_RESET_ENABLE);
1604 ret = wait_for(g4x_reset_complete(pdev), 500);
1605 if (ret) {
1606 DRM_DEBUG_DRIVER("Wait for render reset failed\n");
1607 goto out;
1608 }
fa4f53c4 1609
9593a657 1610out:
dc97997a 1611 pci_write_config_byte(pdev, I915_GDRST, 0);
44e1e7ba
CW
1612
1613 I915_WRITE(VDECCLK_GATE_D,
1614 I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1615 POSTING_READ(VDECCLK_GATE_D);
1616
9593a657 1617 return ret;
fa4f53c4
VS
1618}
1619
dc97997a
CW
1620static int ironlake_do_reset(struct drm_i915_private *dev_priv,
1621 unsigned engine_mask)
907b28c5 1622{
907b28c5
CW
1623 int ret;
1624
9593a657 1625 I915_WRITE(ILK_GDSR, ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
87273b71
CW
1626 ret = intel_wait_for_register(dev_priv,
1627 ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
1628 500);
9593a657
CW
1629 if (ret) {
1630 DRM_DEBUG_DRIVER("Wait for render reset failed\n");
1631 goto out;
1632 }
907b28c5 1633
9593a657 1634 I915_WRITE(ILK_GDSR, ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
87273b71
CW
1635 ret = intel_wait_for_register(dev_priv,
1636 ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
1637 500);
9593a657
CW
1638 if (ret) {
1639 DRM_DEBUG_DRIVER("Wait for media reset failed\n");
1640 goto out;
1641 }
9aa7250f 1642
9593a657 1643out:
c039b7f2 1644 I915_WRITE(ILK_GDSR, 0);
9593a657
CW
1645 POSTING_READ(ILK_GDSR);
1646 return ret;
907b28c5
CW
1647}
1648
ee4b6faf
MK
1649/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
1650static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
1651 u32 hw_domain_mask)
907b28c5 1652{
9593a657
CW
1653 int err;
1654
907b28c5
CW
1655 /* GEN6_GDRST is not in the gt power well, no need to check
1656 * for fifo space for the write or forcewake the chip for
1657 * the read
1658 */
ee4b6faf 1659 __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
907b28c5 1660
a3662830 1661 /* Wait for the device to ack the reset requests */
9593a657 1662 err = intel_wait_for_register_fw(dev_priv,
4a17fe13
CW
1663 GEN6_GDRST, hw_domain_mask, 0,
1664 500);
9593a657
CW
1665 if (err)
1666 DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n",
1667 hw_domain_mask);
1668
1669 return err;
ee4b6faf
MK
1670}
1671
1672/**
1673 * gen6_reset_engines - reset individual engines
dc97997a 1674 * @dev_priv: i915 device
ee4b6faf
MK
1675 * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
1676 *
1677 * This function will reset the individual engines that are set in engine_mask.
1678 * If you provide ALL_ENGINES as mask, full global domain reset will be issued.
1679 *
1680 * Note: It is responsibility of the caller to handle the difference between
1681 * asking full domain reset versus reset for all available individual engines.
1682 *
1683 * Returns 0 on success, nonzero on error.
1684 */
dc97997a
CW
1685static int gen6_reset_engines(struct drm_i915_private *dev_priv,
1686 unsigned engine_mask)
ee4b6faf 1687{
ee4b6faf
MK
1688 struct intel_engine_cs *engine;
1689 const u32 hw_engine_mask[I915_NUM_ENGINES] = {
1690 [RCS] = GEN6_GRDOM_RENDER,
1691 [BCS] = GEN6_GRDOM_BLT,
1692 [VCS] = GEN6_GRDOM_MEDIA,
1693 [VCS2] = GEN8_GRDOM_MEDIA2,
1694 [VECS] = GEN6_GRDOM_VECS,
1695 };
1696 u32 hw_mask;
ee4b6faf
MK
1697
1698 if (engine_mask == ALL_ENGINES) {
1699 hw_mask = GEN6_GRDOM_FULL;
1700 } else {
bafb0fce
CW
1701 unsigned int tmp;
1702
ee4b6faf 1703 hw_mask = 0;
bafb0fce 1704 for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
ee4b6faf
MK
1705 hw_mask |= hw_engine_mask[engine->id];
1706 }
1707
4055dc75 1708 return gen6_hw_domain_reset(dev_priv, hw_mask);
907b28c5
CW
1709}
1710
1758b90e 1711/**
1d1a9774 1712 * __intel_wait_for_register_fw - wait until register matches expected state
1758b90e
CW
1713 * @dev_priv: the i915 device
1714 * @reg: the register to read
1715 * @mask: mask to apply to register value
1716 * @value: expected value
1d1a9774
MW
1717 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
1718 * @slow_timeout_ms: slow timeout in millisecond
1719 * @out_value: optional placeholder to hold registry value
1758b90e
CW
1720 *
1721 * This routine waits until the target register @reg contains the expected
3d466cd6
DV
1722 * @value after applying the @mask, i.e. it waits until ::
1723 *
1724 * (I915_READ_FW(reg) & mask) == value
1725 *
1d1a9774 1726 * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
6976e74b 1727 * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
84d84cb7 1728 * must be not larger than 20,0000 microseconds.
1758b90e
CW
1729 *
1730 * Note that this routine assumes the caller holds forcewake asserted, it is
1731 * not suitable for very long waits. See intel_wait_for_register() if you
1732 * wish to wait without holding forcewake for the duration (i.e. you expect
1733 * the wait to be slow).
1734 *
1735 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1736 */
1d1a9774
MW
1737int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
1738 i915_reg_t reg,
3fc7d86b
MW
1739 u32 mask,
1740 u32 value,
1741 unsigned int fast_timeout_us,
1742 unsigned int slow_timeout_ms,
1d1a9774 1743 u32 *out_value)
1758b90e 1744{
ff26ffa8 1745 u32 uninitialized_var(reg_value);
1d1a9774
MW
1746#define done (((reg_value = I915_READ_FW(reg)) & mask) == value)
1747 int ret;
1748
6976e74b 1749 /* Catch any overuse of this function */
84d84cb7
CW
1750 might_sleep_if(slow_timeout_ms);
1751 GEM_BUG_ON(fast_timeout_us > 20000);
6976e74b 1752
84d84cb7
CW
1753 ret = -ETIMEDOUT;
1754 if (fast_timeout_us && fast_timeout_us <= 20000)
1d1a9774 1755 ret = _wait_for_atomic(done, fast_timeout_us, 0);
ff26ffa8 1756 if (ret && slow_timeout_ms)
1d1a9774 1757 ret = wait_for(done, slow_timeout_ms);
84d84cb7 1758
1d1a9774
MW
1759 if (out_value)
1760 *out_value = reg_value;
84d84cb7 1761
1758b90e
CW
1762 return ret;
1763#undef done
1764}
1765
1766/**
1767 * intel_wait_for_register - wait until register matches expected state
1768 * @dev_priv: the i915 device
1769 * @reg: the register to read
1770 * @mask: mask to apply to register value
1771 * @value: expected value
1772 * @timeout_ms: timeout in millisecond
1773 *
1774 * This routine waits until the target register @reg contains the expected
3d466cd6
DV
1775 * @value after applying the @mask, i.e. it waits until ::
1776 *
1777 * (I915_READ(reg) & mask) == value
1778 *
1758b90e
CW
1779 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
1780 *
1781 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1782 */
1783int intel_wait_for_register(struct drm_i915_private *dev_priv,
1784 i915_reg_t reg,
3fc7d86b
MW
1785 u32 mask,
1786 u32 value,
1787 unsigned int timeout_ms)
7fd2d269 1788{
1758b90e
CW
1789 unsigned fw =
1790 intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
1791 int ret;
1792
05646543
CW
1793 might_sleep();
1794
1795 spin_lock_irq(&dev_priv->uncore.lock);
1796 intel_uncore_forcewake_get__locked(dev_priv, fw);
1797
1798 ret = __intel_wait_for_register_fw(dev_priv,
1799 reg, mask, value,
1800 2, 0, NULL);
1801
1802 intel_uncore_forcewake_put__locked(dev_priv, fw);
1803 spin_unlock_irq(&dev_priv->uncore.lock);
1804
1758b90e
CW
1805 if (ret)
1806 ret = wait_for((I915_READ_NOTRACE(reg) & mask) == value,
1807 timeout_ms);
1808
1809 return ret;
d431440c
TE
1810}
1811
e3895af8 1812static int gen8_reset_engine_start(struct intel_engine_cs *engine)
d431440c 1813{
c033666a 1814 struct drm_i915_private *dev_priv = engine->i915;
d431440c 1815 int ret;
d431440c
TE
1816
1817 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1818 _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
1819
1758b90e
CW
1820 ret = intel_wait_for_register_fw(dev_priv,
1821 RING_RESET_CTL(engine->mmio_base),
1822 RESET_CTL_READY_TO_RESET,
1823 RESET_CTL_READY_TO_RESET,
1824 700);
d431440c
TE
1825 if (ret)
1826 DRM_ERROR("%s: reset request timeout\n", engine->name);
1827
1828 return ret;
1829}
1830
e3895af8 1831static void gen8_reset_engine_cancel(struct intel_engine_cs *engine)
d431440c 1832{
c033666a 1833 struct drm_i915_private *dev_priv = engine->i915;
d431440c
TE
1834
1835 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1836 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
7fd2d269
MK
1837}
1838
dc97997a
CW
1839static int gen8_reset_engines(struct drm_i915_private *dev_priv,
1840 unsigned engine_mask)
7fd2d269 1841{
7fd2d269 1842 struct intel_engine_cs *engine;
bafb0fce 1843 unsigned int tmp;
7fd2d269 1844
bafb0fce 1845 for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
e3895af8 1846 if (gen8_reset_engine_start(engine))
7fd2d269 1847 goto not_ready;
7fd2d269 1848
dc97997a 1849 return gen6_reset_engines(dev_priv, engine_mask);
7fd2d269
MK
1850
1851not_ready:
bafb0fce 1852 for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
e3895af8 1853 gen8_reset_engine_cancel(engine);
7fd2d269
MK
1854
1855 return -EIO;
1856}
1857
dc97997a
CW
1858typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
1859
1860static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
907b28c5 1861{
4f044a88 1862 if (!i915_modparams.reset)
b1330fbb
CW
1863 return NULL;
1864
dc97997a 1865 if (INTEL_INFO(dev_priv)->gen >= 8)
ee4b6faf 1866 return gen8_reset_engines;
dc97997a 1867 else if (INTEL_INFO(dev_priv)->gen >= 6)
ee4b6faf 1868 return gen6_reset_engines;
dc97997a 1869 else if (IS_GEN5(dev_priv))
49e4d842 1870 return ironlake_do_reset;
dc97997a 1871 else if (IS_G4X(dev_priv))
49e4d842 1872 return g4x_do_reset;
73f67aa8 1873 else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
49e4d842 1874 return g33_do_reset;
dc97997a 1875 else if (INTEL_INFO(dev_priv)->gen >= 3)
49e4d842 1876 return i915_do_reset;
542c184f 1877 else
49e4d842
CW
1878 return NULL;
1879}
1880
dc97997a 1881int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
49e4d842 1882{
5896a5c8 1883 reset_func reset = intel_get_gpu_reset(dev_priv);
9593a657 1884 int retry;
99106bc1 1885 int ret;
49e4d842 1886
9593a657
CW
1887 might_sleep();
1888
99106bc1
MK
1889 /* If the power well sleeps during the reset, the reset
1890 * request may be dropped and never completes (causing -EIO).
1891 */
1892 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
9593a657 1893 for (retry = 0; retry < 3; retry++) {
87de8d56
MK
1894
1895 /* We stop engines, otherwise we might get failed reset and a
1896 * dead gpu (on elk). Also as modern gpu as kbl can suffer
1897 * from system hang if batchbuffer is progressing when
1898 * the reset is issued, regardless of READY_TO_RESET ack.
1899 * Thus assume it is best to stop engines on all gens
1900 * where we have a gpu reset.
1901 *
1902 * WaMediaResetMainRingCleanup:ctg,elk (presumably)
1903 *
1904 * FIXME: Wa for more modern gens needs to be validated
1905 */
1906 i915_stop_engines(dev_priv, engine_mask);
1907
5896a5c8
CW
1908 ret = -ENODEV;
1909 if (reset)
1910 ret = reset(dev_priv, engine_mask);
9593a657
CW
1911 if (ret != -ETIMEDOUT)
1912 break;
1913
1914 cond_resched();
1915 }
99106bc1
MK
1916 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1917
1918 return ret;
49e4d842
CW
1919}
1920
dc97997a 1921bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
49e4d842 1922{
dc97997a 1923 return intel_get_gpu_reset(dev_priv) != NULL;
907b28c5
CW
1924}
1925
142bc7d9
MT
1926bool intel_has_reset_engine(struct drm_i915_private *dev_priv)
1927{
1928 return (dev_priv->info.has_reset_engine &&
4f044a88 1929 i915_modparams.reset >= 2);
142bc7d9
MT
1930}
1931
cb20a3c0 1932int intel_reset_guc(struct drm_i915_private *dev_priv)
6b332fa2
AS
1933{
1934 int ret;
6b332fa2 1935
1a3d1898 1936 if (!HAS_GUC(dev_priv))
6b332fa2
AS
1937 return -EINVAL;
1938
1939 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
6b332fa2 1940 ret = gen6_hw_domain_reset(dev_priv, GEN9_GRDOM_GUC);
6b332fa2
AS
1941 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1942
1943 return ret;
1944}
1945
fc97618b 1946bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
907b28c5 1947{
fc97618b 1948 return check_for_unclaimed_mmio(dev_priv);
907b28c5 1949}
75714940 1950
bc3b9346 1951bool
75714940
MK
1952intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
1953{
4f044a88 1954 if (unlikely(i915_modparams.mmio_debug ||
75714940 1955 dev_priv->uncore.unclaimed_mmio_check <= 0))
bc3b9346 1956 return false;
75714940
MK
1957
1958 if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
1959 DRM_DEBUG("Unclaimed register detected, "
1960 "enabling oneshot unclaimed register reporting. "
1961 "Please use i915.mmio_debug=N for more information.\n");
4f044a88 1962 i915_modparams.mmio_debug++;
75714940 1963 dev_priv->uncore.unclaimed_mmio_check--;
bc3b9346 1964 return true;
75714940 1965 }
bc3b9346
MK
1966
1967 return false;
75714940 1968}
3756685a
TU
1969
1970static enum forcewake_domains
1971intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
1972 i915_reg_t reg)
1973{
895833bd 1974 u32 offset = i915_mmio_reg_offset(reg);
3756685a
TU
1975 enum forcewake_domains fw_domains;
1976
895833bd
TU
1977 if (HAS_FWTABLE(dev_priv)) {
1978 fw_domains = __fwtable_reg_read_fw_domains(offset);
1979 } else if (INTEL_GEN(dev_priv) >= 6) {
1980 fw_domains = __gen6_reg_read_fw_domains(offset);
1981 } else {
1982 WARN_ON(!IS_GEN(dev_priv, 2, 5));
1983 fw_domains = 0;
3756685a
TU
1984 }
1985
1986 WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
1987
1988 return fw_domains;
1989}
1990
1991static enum forcewake_domains
1992intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
1993 i915_reg_t reg)
1994{
22d48c55 1995 u32 offset = i915_mmio_reg_offset(reg);
3756685a
TU
1996 enum forcewake_domains fw_domains;
1997
22d48c55
TU
1998 if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
1999 fw_domains = __fwtable_reg_write_fw_domains(offset);
2000 } else if (IS_GEN8(dev_priv)) {
2001 fw_domains = __gen8_reg_write_fw_domains(offset);
2002 } else if (IS_GEN(dev_priv, 6, 7)) {
3756685a 2003 fw_domains = FORCEWAKE_RENDER;
22d48c55
TU
2004 } else {
2005 WARN_ON(!IS_GEN(dev_priv, 2, 5));
2006 fw_domains = 0;
3756685a
TU
2007 }
2008
2009 WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
2010
2011 return fw_domains;
2012}
2013
2014/**
2015 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
2016 * a register
2017 * @dev_priv: pointer to struct drm_i915_private
2018 * @reg: register in question
2019 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
2020 *
2021 * Returns a set of forcewake domains required to be taken with for example
2022 * intel_uncore_forcewake_get for the specified register to be accessible in the
2023 * specified mode (read, write or read/write) with raw mmio accessors.
2024 *
2025 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
2026 * callers to do FIFO management on their own or risk losing writes.
2027 */
2028enum forcewake_domains
2029intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
2030 i915_reg_t reg, unsigned int op)
2031{
2032 enum forcewake_domains fw_domains = 0;
2033
2034 WARN_ON(!op);
2035
895833bd
TU
2036 if (intel_vgpu_active(dev_priv))
2037 return 0;
2038
3756685a
TU
2039 if (op & FW_REG_READ)
2040 fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg);
2041
2042 if (op & FW_REG_WRITE)
2043 fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg);
2044
2045 return fw_domains;
2046}
26e7a2a1
CW
2047
2048#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
0757ac8f 2049#include "selftests/mock_uncore.c"
26e7a2a1
CW
2050#include "selftests/intel_uncore.c"
2051#endif