drm/i915/guc: capture GuC logs if FW fails to load
[linux-2.6-block.git] / drivers / gpu / drm / i915 / intel_uncore.c
CommitLineData
907b28c5
CW
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include "i915_drv.h"
25#include "intel_drv.h"
cf9d2890 26#include "i915_vgpu.h"
907b28c5 27
264ec1a8 28#include <asm/iosf_mbi.h>
6daccb0b
CW
29#include <linux/pm_runtime.h>
30
83e33372 31#define FORCEWAKE_ACK_TIMEOUT_MS 50
6b07b6d2 32#define GT_FIFO_TIMEOUT_MS 10
907b28c5 33
75aa3f63 34#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
6af5d92f 35
05a2fb15
MK
36static const char * const forcewake_domain_names[] = {
37 "render",
38 "blitter",
39 "media",
40};
41
42const char *
48c1026a 43intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
05a2fb15 44{
53abb679 45 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
05a2fb15
MK
46
47 if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
48 return forcewake_domain_names[id];
49
50 WARN_ON(id);
51
52 return "unknown";
53}
54
05a2fb15 55static inline void
577ac4bd
CW
56fw_domain_reset(struct drm_i915_private *i915,
57 const struct intel_uncore_forcewake_domain *d)
907b28c5 58{
6e3955a5 59 __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_reset);
907b28c5
CW
60}
61
05a2fb15
MK
62static inline void
63fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
907b28c5 64{
a57a4a67
TU
65 d->wake_count++;
66 hrtimer_start_range_ns(&d->timer,
8b0e1953 67 NSEC_PER_MSEC,
a57a4a67
TU
68 NSEC_PER_MSEC,
69 HRTIMER_MODE_REL);
907b28c5
CW
70}
71
05a2fb15 72static inline void
6e3955a5 73fw_domain_wait_ack_clear(const struct drm_i915_private *i915,
577ac4bd 74 const struct intel_uncore_forcewake_domain *d)
907b28c5 75{
577ac4bd 76 if (wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) &
05a2fb15 77 FORCEWAKE_KERNEL) == 0,
907b28c5 78 FORCEWAKE_ACK_TIMEOUT_MS))
05a2fb15
MK
79 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
80 intel_uncore_forcewake_domain_to_str(d->id));
81}
907b28c5 82
05a2fb15 83static inline void
577ac4bd
CW
84fw_domain_get(struct drm_i915_private *i915,
85 const struct intel_uncore_forcewake_domain *d)
05a2fb15 86{
6e3955a5 87 __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_set);
05a2fb15 88}
907b28c5 89
05a2fb15 90static inline void
6e3955a5 91fw_domain_wait_ack(const struct drm_i915_private *i915,
577ac4bd 92 const struct intel_uncore_forcewake_domain *d)
05a2fb15 93{
577ac4bd 94 if (wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) &
05a2fb15 95 FORCEWAKE_KERNEL),
907b28c5 96 FORCEWAKE_ACK_TIMEOUT_MS))
05a2fb15
MK
97 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
98 intel_uncore_forcewake_domain_to_str(d->id));
99}
907b28c5 100
05a2fb15 101static inline void
6e3955a5 102fw_domain_put(const struct drm_i915_private *i915,
577ac4bd 103 const struct intel_uncore_forcewake_domain *d)
05a2fb15 104{
6e3955a5 105 __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_clear);
907b28c5
CW
106}
107
05a2fb15 108static void
577ac4bd 109fw_domains_get(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
907b28c5 110{
05a2fb15 111 struct intel_uncore_forcewake_domain *d;
d2dc94bc 112 unsigned int tmp;
907b28c5 113
d2dc94bc
CW
114 GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
115
116 for_each_fw_domain_masked(d, fw_domains, i915, tmp) {
577ac4bd
CW
117 fw_domain_wait_ack_clear(i915, d);
118 fw_domain_get(i915, d);
05a2fb15 119 }
4e1176dd 120
d2dc94bc 121 for_each_fw_domain_masked(d, fw_domains, i915, tmp)
577ac4bd 122 fw_domain_wait_ack(i915, d);
b8473050 123
577ac4bd 124 i915->uncore.fw_domains_active |= fw_domains;
05a2fb15 125}
907b28c5 126
05a2fb15 127static void
577ac4bd 128fw_domains_put(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
05a2fb15
MK
129{
130 struct intel_uncore_forcewake_domain *d;
d2dc94bc
CW
131 unsigned int tmp;
132
133 GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
907b28c5 134
0f966aaf 135 for_each_fw_domain_masked(d, fw_domains, i915, tmp)
577ac4bd 136 fw_domain_put(i915, d);
b8473050 137
577ac4bd 138 i915->uncore.fw_domains_active &= ~fw_domains;
05a2fb15 139}
907b28c5 140
05a2fb15 141static void
577ac4bd
CW
142fw_domains_reset(struct drm_i915_private *i915,
143 enum forcewake_domains fw_domains)
05a2fb15
MK
144{
145 struct intel_uncore_forcewake_domain *d;
d2dc94bc 146 unsigned int tmp;
05a2fb15 147
d2dc94bc 148 if (!fw_domains)
3225b2f9 149 return;
f9b3927a 150
d2dc94bc
CW
151 GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
152
153 for_each_fw_domain_masked(d, fw_domains, i915, tmp)
577ac4bd 154 fw_domain_reset(i915, d);
05a2fb15
MK
155}
156
157static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
158{
159 /* w/a for a sporadic read returning 0 by waiting for the GT
160 * thread to wake up.
161 */
162 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
163 GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
164 DRM_ERROR("GT thread status wait timed out\n");
165}
166
167static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
48c1026a 168 enum forcewake_domains fw_domains)
05a2fb15
MK
169{
170 fw_domains_get(dev_priv, fw_domains);
907b28c5 171
05a2fb15 172 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
c549f738 173 __gen6_gt_wait_for_thread_c0(dev_priv);
907b28c5
CW
174}
175
c32e3788
DG
176static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
177{
178 u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
179
180 return count & GT_FIFO_FREE_ENTRIES_MASK;
181}
182
6b07b6d2 183static void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
907b28c5 184{
6b07b6d2 185 u32 n;
907b28c5 186
5135d64b
D
187 /* On VLV, FIFO will be shared by both SW and HW.
188 * So, we need to read the FREE_ENTRIES everytime */
2d1fe073 189 if (IS_VALLEYVIEW(dev_priv))
6b07b6d2
MK
190 n = fifo_free_entries(dev_priv);
191 else
192 n = dev_priv->uncore.fifo_count;
193
194 if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
195 if (wait_for_atomic((n = fifo_free_entries(dev_priv)) >
196 GT_FIFO_NUM_RESERVED_ENTRIES,
197 GT_FIFO_TIMEOUT_MS)) {
198 DRM_DEBUG("GT_FIFO timeout, entries: %u\n", n);
199 return;
907b28c5 200 }
907b28c5 201 }
907b28c5 202
6b07b6d2 203 dev_priv->uncore.fifo_count = n - 1;
907b28c5
CW
204}
205
a57a4a67
TU
206static enum hrtimer_restart
207intel_uncore_fw_release_timer(struct hrtimer *timer)
38cff0b1 208{
a57a4a67
TU
209 struct intel_uncore_forcewake_domain *domain =
210 container_of(timer, struct intel_uncore_forcewake_domain, timer);
577ac4bd
CW
211 struct drm_i915_private *dev_priv =
212 container_of(domain, struct drm_i915_private, uncore.fw_domain[domain->id]);
b2cff0db 213 unsigned long irqflags;
38cff0b1 214
003342a5 215 assert_rpm_device_not_suspended(dev_priv);
38cff0b1 216
003342a5 217 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
b2cff0db
CW
218 if (WARN_ON(domain->wake_count == 0))
219 domain->wake_count++;
220
b8473050 221 if (--domain->wake_count == 0)
003342a5 222 dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
b2cff0db 223
003342a5 224 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
a57a4a67
TU
225
226 return HRTIMER_NORESTART;
38cff0b1
ZW
227}
228
68f60946
HG
229static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
230 bool restore)
38cff0b1 231{
48c1026a 232 unsigned long irqflags;
b2cff0db 233 struct intel_uncore_forcewake_domain *domain;
48c1026a 234 int retry_count = 100;
003342a5 235 enum forcewake_domains fw, active_domains;
38cff0b1 236
b2cff0db
CW
237 /* Hold uncore.lock across reset to prevent any register access
238 * with forcewake not set correctly. Wait until all pending
239 * timers are run before holding.
240 */
241 while (1) {
d2dc94bc
CW
242 unsigned int tmp;
243
b2cff0db 244 active_domains = 0;
38cff0b1 245
d2dc94bc 246 for_each_fw_domain(domain, dev_priv, tmp) {
a57a4a67 247 if (hrtimer_cancel(&domain->timer) == 0)
b2cff0db 248 continue;
38cff0b1 249
a57a4a67 250 intel_uncore_fw_release_timer(&domain->timer);
b2cff0db 251 }
aec347ab 252
b2cff0db 253 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
b2ec142c 254
d2dc94bc 255 for_each_fw_domain(domain, dev_priv, tmp) {
a57a4a67 256 if (hrtimer_active(&domain->timer))
33c582c1 257 active_domains |= domain->mask;
b2cff0db 258 }
3123fcaf 259
b2cff0db
CW
260 if (active_domains == 0)
261 break;
aec347ab 262
b2cff0db
CW
263 if (--retry_count == 0) {
264 DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
265 break;
266 }
0294ae7b 267
b2cff0db
CW
268 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
269 cond_resched();
270 }
0294ae7b 271
b2cff0db
CW
272 WARN_ON(active_domains);
273
003342a5 274 fw = dev_priv->uncore.fw_domains_active;
b2cff0db
CW
275 if (fw)
276 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
ef46e0d2 277
cb3600db 278 fw_domains_reset(dev_priv, dev_priv->uncore.fw_domains);
38cff0b1 279
0294ae7b 280 if (restore) { /* If reset with a user forcewake, try to restore */
0294ae7b
CW
281 if (fw)
282 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
283
dc97997a 284 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
0294ae7b 285 dev_priv->uncore.fifo_count =
c32e3788 286 fifo_free_entries(dev_priv);
0294ae7b
CW
287 }
288
b2cff0db 289 if (!restore)
59bad947 290 assert_forcewakes_inactive(dev_priv);
b2cff0db 291
0294ae7b 292 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
ef46e0d2
DV
293}
294
c02e85a0
MK
295static u64 gen9_edram_size(struct drm_i915_private *dev_priv)
296{
297 const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
298 const unsigned int sets[4] = { 1, 1, 2, 2 };
299 const u32 cap = dev_priv->edram_cap;
300
301 return EDRAM_NUM_BANKS(cap) *
302 ways[EDRAM_WAYS_IDX(cap)] *
303 sets[EDRAM_SETS_IDX(cap)] *
304 1024 * 1024;
305}
306
3accaf7e 307u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv)
907b28c5 308{
3accaf7e
MK
309 if (!HAS_EDRAM(dev_priv))
310 return 0;
311
c02e85a0
MK
312 /* The needed capability bits for size calculation
313 * are not there with pre gen9 so return 128MB always.
3accaf7e 314 */
c02e85a0
MK
315 if (INTEL_GEN(dev_priv) < 9)
316 return 128 * 1024 * 1024;
3accaf7e 317
c02e85a0 318 return gen9_edram_size(dev_priv);
3accaf7e 319}
907b28c5 320
3accaf7e
MK
321static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv)
322{
323 if (IS_HASWELL(dev_priv) ||
324 IS_BROADWELL(dev_priv) ||
325 INTEL_GEN(dev_priv) >= 9) {
326 dev_priv->edram_cap = __raw_i915_read32(dev_priv,
327 HSW_EDRAM_CAP);
328
329 /* NB: We can't write IDICR yet because we do not have gt funcs
18ce3994 330 * set up */
3accaf7e
MK
331 } else {
332 dev_priv->edram_cap = 0;
18ce3994 333 }
3accaf7e
MK
334
335 if (HAS_EDRAM(dev_priv))
336 DRM_INFO("Found %lluMB of eDRAM\n",
337 intel_uncore_edram_size(dev_priv) / (1024 * 1024));
f9b3927a
MK
338}
339
8a47eb19 340static bool
8ac3e1bb 341fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
8a47eb19
MK
342{
343 u32 dbg;
344
8a47eb19
MK
345 dbg = __raw_i915_read32(dev_priv, FPGA_DBG);
346 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
347 return false;
348
349 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
350
351 return true;
352}
353
8ac3e1bb
MK
354static bool
355vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
356{
357 u32 cer;
358
359 cer = __raw_i915_read32(dev_priv, CLAIM_ER);
360 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
361 return false;
362
363 __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR);
364
365 return true;
366}
367
a338908c
MK
368static bool
369gen6_check_for_fifo_debug(struct drm_i915_private *dev_priv)
370{
371 u32 fifodbg;
372
373 fifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
374
375 if (unlikely(fifodbg)) {
376 DRM_DEBUG_DRIVER("GTFIFODBG = 0x08%x\n", fifodbg);
377 __raw_i915_write32(dev_priv, GTFIFODBG, fifodbg);
378 }
379
380 return fifodbg;
381}
382
8ac3e1bb
MK
383static bool
384check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
385{
a338908c
MK
386 bool ret = false;
387
8ac3e1bb 388 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
a338908c 389 ret |= fpga_check_for_unclaimed_mmio(dev_priv);
8ac3e1bb
MK
390
391 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
a338908c
MK
392 ret |= vlv_check_for_unclaimed_mmio(dev_priv);
393
394 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
395 ret |= gen6_check_for_fifo_debug(dev_priv);
8ac3e1bb 396
a338908c 397 return ret;
8ac3e1bb
MK
398}
399
dc97997a 400static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
f9b3927a
MK
401 bool restore_forcewake)
402{
85ee17eb
PP
403 struct intel_device_info *info = mkwrite_device_info(dev_priv);
404
8a47eb19
MK
405 /* clear out unclaimed reg detection bit */
406 if (check_for_unclaimed_mmio(dev_priv))
407 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
907b28c5 408
a04f90a3 409 /* WaDisableShadowRegForCpd:chv */
dc97997a 410 if (IS_CHERRYVIEW(dev_priv)) {
a04f90a3
D
411 __raw_i915_write32(dev_priv, GTFIFOCTL,
412 __raw_i915_read32(dev_priv, GTFIFOCTL) |
413 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
414 GT_FIFO_CTL_RC6_POLICY_STALL);
415 }
416
a3f79ca6 417 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST))
85ee17eb
PP
418 info->has_decoupled_mmio = false;
419
dc97997a 420 intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
521198a2
MK
421}
422
68f60946 423void intel_uncore_suspend(struct drm_i915_private *dev_priv)
ed493883 424{
264ec1a8
HG
425 iosf_mbi_unregister_pmic_bus_access_notifier(
426 &dev_priv->uncore.pmic_bus_access_nb);
68f60946
HG
427 intel_uncore_forcewake_reset(dev_priv, false);
428}
429
430void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
431{
432 __intel_uncore_early_sanitize(dev_priv, true);
264ec1a8
HG
433 iosf_mbi_register_pmic_bus_access_notifier(
434 &dev_priv->uncore.pmic_bus_access_nb);
dc97997a 435 i915_check_and_clear_faults(dev_priv);
ed493883
ID
436}
437
dc97997a 438void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
521198a2 439{
dc97997a 440 i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6);
274008e8 441
907b28c5 442 /* BIOS often leaves RC6 enabled, but disable it for hw init */
54b4f68f 443 intel_sanitize_gt_powersave(dev_priv);
907b28c5
CW
444}
445
a6111f7b
CW
446static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
447 enum forcewake_domains fw_domains)
448{
449 struct intel_uncore_forcewake_domain *domain;
d2dc94bc 450 unsigned int tmp;
a6111f7b 451
a6111f7b
CW
452 fw_domains &= dev_priv->uncore.fw_domains;
453
d2dc94bc 454 for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp)
a6111f7b 455 if (domain->wake_count++)
33c582c1 456 fw_domains &= ~domain->mask;
a6111f7b 457
b8473050 458 if (fw_domains)
a6111f7b
CW
459 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
460}
461
59bad947
MK
462/**
463 * intel_uncore_forcewake_get - grab forcewake domain references
464 * @dev_priv: i915 device instance
465 * @fw_domains: forcewake domains to get reference on
466 *
467 * This function can be used get GT's forcewake domain references.
468 * Normal register access will handle the forcewake domains automatically.
469 * However if some sequence requires the GT to not power down a particular
470 * forcewake domains this function should be called at the beginning of the
471 * sequence. And subsequently the reference should be dropped by symmetric
472 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
473 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
907b28c5 474 */
59bad947 475void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
48c1026a 476 enum forcewake_domains fw_domains)
907b28c5
CW
477{
478 unsigned long irqflags;
479
ab484f8f
BW
480 if (!dev_priv->uncore.funcs.force_wake_get)
481 return;
482
c9b8846a 483 assert_rpm_wakelock_held(dev_priv);
c8c8fb33 484
6daccb0b 485 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
a6111f7b 486 __intel_uncore_forcewake_get(dev_priv, fw_domains);
907b28c5
CW
487 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
488}
489
59bad947 490/**
a6111f7b 491 * intel_uncore_forcewake_get__locked - grab forcewake domain references
59bad947 492 * @dev_priv: i915 device instance
a6111f7b 493 * @fw_domains: forcewake domains to get reference on
59bad947 494 *
a6111f7b
CW
495 * See intel_uncore_forcewake_get(). This variant places the onus
496 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
907b28c5 497 */
a6111f7b
CW
498void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
499 enum forcewake_domains fw_domains)
500{
67520415 501 lockdep_assert_held(&dev_priv->uncore.lock);
a6111f7b
CW
502
503 if (!dev_priv->uncore.funcs.force_wake_get)
504 return;
505
506 __intel_uncore_forcewake_get(dev_priv, fw_domains);
507}
508
509static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
510 enum forcewake_domains fw_domains)
907b28c5 511{
b2cff0db 512 struct intel_uncore_forcewake_domain *domain;
d2dc94bc 513 unsigned int tmp;
907b28c5 514
b2cff0db
CW
515 fw_domains &= dev_priv->uncore.fw_domains;
516
d2dc94bc 517 for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) {
b2cff0db
CW
518 if (WARN_ON(domain->wake_count == 0))
519 continue;
520
521 if (--domain->wake_count)
522 continue;
523
05a2fb15 524 fw_domain_arm_timer(domain);
aec347ab 525 }
a6111f7b 526}
dc9fb09c 527
a6111f7b
CW
528/**
529 * intel_uncore_forcewake_put - release a forcewake domain reference
530 * @dev_priv: i915 device instance
531 * @fw_domains: forcewake domains to put references
532 *
533 * This function drops the device-level forcewakes for specified
534 * domains obtained by intel_uncore_forcewake_get().
535 */
536void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
537 enum forcewake_domains fw_domains)
538{
539 unsigned long irqflags;
540
541 if (!dev_priv->uncore.funcs.force_wake_put)
542 return;
543
544 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
545 __intel_uncore_forcewake_put(dev_priv, fw_domains);
907b28c5
CW
546 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
547}
548
a6111f7b
CW
549/**
550 * intel_uncore_forcewake_put__locked - grab forcewake domain references
551 * @dev_priv: i915 device instance
552 * @fw_domains: forcewake domains to get reference on
553 *
554 * See intel_uncore_forcewake_put(). This variant places the onus
555 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
556 */
557void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
558 enum forcewake_domains fw_domains)
559{
67520415 560 lockdep_assert_held(&dev_priv->uncore.lock);
a6111f7b
CW
561
562 if (!dev_priv->uncore.funcs.force_wake_put)
563 return;
564
565 __intel_uncore_forcewake_put(dev_priv, fw_domains);
566}
567
59bad947 568void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
e998c40f
PZ
569{
570 if (!dev_priv->uncore.funcs.force_wake_get)
571 return;
572
003342a5 573 WARN_ON(dev_priv->uncore.fw_domains_active);
e998c40f
PZ
574}
575
907b28c5 576/* We give fast paths for the really cool registers */
40181697 577#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
907b28c5 578
6863b76c
TU
579#define __gen6_reg_read_fw_domains(offset) \
580({ \
581 enum forcewake_domains __fwd; \
582 if (NEEDS_FORCE_WAKE(offset)) \
583 __fwd = FORCEWAKE_RENDER; \
584 else \
585 __fwd = 0; \
586 __fwd; \
587})
588
9480dbf0 589static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
91e630b9 590{
91e630b9
TU
591 if (offset < entry->start)
592 return -1;
593 else if (offset > entry->end)
594 return 1;
595 else
596 return 0;
597}
598
9480dbf0
TU
599/* Copied and "macroized" from lib/bsearch.c */
600#define BSEARCH(key, base, num, cmp) ({ \
601 unsigned int start__ = 0, end__ = (num); \
602 typeof(base) result__ = NULL; \
603 while (start__ < end__) { \
604 unsigned int mid__ = start__ + (end__ - start__) / 2; \
605 int ret__ = (cmp)((key), (base) + mid__); \
606 if (ret__ < 0) { \
607 end__ = mid__; \
608 } else if (ret__ > 0) { \
609 start__ = mid__ + 1; \
610 } else { \
611 result__ = (base) + mid__; \
612 break; \
613 } \
614 } \
615 result__; \
616})
617
9fc1117c 618static enum forcewake_domains
15157970 619find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
9fc1117c 620{
9480dbf0 621 const struct intel_forcewake_range *entry;
9fc1117c 622
9480dbf0
TU
623 entry = BSEARCH(offset,
624 dev_priv->uncore.fw_domains_table,
625 dev_priv->uncore.fw_domains_table_entries,
91e630b9 626 fw_range_cmp);
38fb6a40 627
99191427
JL
628 if (!entry)
629 return 0;
630
631 WARN(entry->domains & ~dev_priv->uncore.fw_domains,
632 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
633 entry->domains & ~dev_priv->uncore.fw_domains, offset);
634
635 return entry->domains;
9fc1117c
TU
636}
637
638#define GEN_FW_RANGE(s, e, d) \
639 { .start = (s), .end = (e), .domains = (d) }
1938e59a 640
895833bd
TU
641#define HAS_FWTABLE(dev_priv) \
642 (IS_GEN9(dev_priv) || \
643 IS_CHERRYVIEW(dev_priv) || \
644 IS_VALLEYVIEW(dev_priv))
645
b0081239 646/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
9fc1117c
TU
647static const struct intel_forcewake_range __vlv_fw_ranges[] = {
648 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
649 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
650 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
9fc1117c
TU
651 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
652 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
b0081239 653 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
9fc1117c
TU
654 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
655};
1938e59a 656
895833bd 657#define __fwtable_reg_read_fw_domains(offset) \
6863b76c
TU
658({ \
659 enum forcewake_domains __fwd = 0; \
0dd356bb 660 if (NEEDS_FORCE_WAKE((offset))) \
15157970 661 __fwd = find_fw_domain(dev_priv, offset); \
6863b76c
TU
662 __fwd; \
663})
664
47188574 665/* *Must* be sorted by offset! See intel_shadow_table_check(). */
6863b76c 666static const i915_reg_t gen8_shadowed_regs[] = {
47188574
TU
667 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
668 GEN6_RPNSWREQ, /* 0xA008 */
669 GEN6_RC_VIDEO_FREQ, /* 0xA00C */
670 RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */
671 RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */
672 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
6863b76c
TU
673 /* TODO: Other registers are not yet used */
674};
675
9480dbf0 676static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
5a659383 677{
9480dbf0 678 u32 offset = i915_mmio_reg_offset(*reg);
5a659383 679
9480dbf0 680 if (key < offset)
5a659383 681 return -1;
9480dbf0 682 else if (key > offset)
5a659383
TU
683 return 1;
684 else
685 return 0;
686}
687
6863b76c
TU
688static bool is_gen8_shadowed(u32 offset)
689{
9480dbf0 690 const i915_reg_t *regs = gen8_shadowed_regs;
5a659383 691
9480dbf0
TU
692 return BSEARCH(offset, regs, ARRAY_SIZE(gen8_shadowed_regs),
693 mmio_reg_cmp);
6863b76c
TU
694}
695
696#define __gen8_reg_write_fw_domains(offset) \
697({ \
698 enum forcewake_domains __fwd; \
699 if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
700 __fwd = FORCEWAKE_RENDER; \
701 else \
702 __fwd = 0; \
703 __fwd; \
704})
705
b0081239 706/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
9fc1117c
TU
707static const struct intel_forcewake_range __chv_fw_ranges[] = {
708 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
b0081239 709 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c 710 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
b0081239 711 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c 712 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
b0081239 713 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c 714 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
b0081239
TU
715 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
716 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
9fc1117c 717 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
b0081239
TU
718 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
719 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c
TU
720 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
721 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
722 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
723 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
9fc1117c 724};
38fb6a40 725
22d48c55 726#define __fwtable_reg_write_fw_domains(offset) \
6863b76c
TU
727({ \
728 enum forcewake_domains __fwd = 0; \
0dd356bb 729 if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
15157970 730 __fwd = find_fw_domain(dev_priv, offset); \
6863b76c
TU
731 __fwd; \
732})
733
b0081239 734/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
9fc1117c 735static const struct intel_forcewake_range __gen9_fw_ranges[] = {
0dd356bb 736 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
9fc1117c
TU
737 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
738 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
0dd356bb 739 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
9fc1117c 740 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
0dd356bb 741 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
9fc1117c 742 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
0dd356bb 743 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER),
b0081239 744 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
9fc1117c 745 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
0dd356bb 746 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
9fc1117c 747 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
0dd356bb 748 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
b0081239 749 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
0dd356bb 750 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER),
9fc1117c 751 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
0dd356bb 752 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
9fc1117c 753 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
0dd356bb 754 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
b0081239 755 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
78424c92 756 GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER),
9fc1117c 757 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
0dd356bb 758 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER),
b0081239 759 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
0dd356bb 760 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER),
9fc1117c 761 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
0dd356bb 762 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER),
9fc1117c 763 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
0dd356bb 764 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER),
b0081239 765 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
0dd356bb 766 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER),
9fc1117c
TU
767 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
768};
6863b76c 769
907b28c5
CW
770static void
771ilk_dummy_write(struct drm_i915_private *dev_priv)
772{
773 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
774 * the chip from rc6 before touching it for real. MI_MODE is masked,
775 * hence harmless to write 0 into. */
6af5d92f 776 __raw_i915_write32(dev_priv, MI_MODE, 0);
907b28c5
CW
777}
778
779static void
9c053501
MK
780__unclaimed_reg_debug(struct drm_i915_private *dev_priv,
781 const i915_reg_t reg,
782 const bool read,
783 const bool before)
907b28c5 784{
dda96033
CW
785 if (WARN(check_for_unclaimed_mmio(dev_priv) && !before,
786 "Unclaimed %s register 0x%x\n",
787 read ? "read from" : "write to",
4bd0a25d 788 i915_mmio_reg_offset(reg)))
48572edd 789 i915.mmio_debug--; /* Only report the first N failures */
907b28c5
CW
790}
791
9c053501
MK
792static inline void
793unclaimed_reg_debug(struct drm_i915_private *dev_priv,
794 const i915_reg_t reg,
795 const bool read,
796 const bool before)
797{
798 if (likely(!i915.mmio_debug))
799 return;
800
801 __unclaimed_reg_debug(dev_priv, reg, read, before);
802}
803
16586fcd
MW
804enum decoupled_power_domain {
805 GEN9_DECOUPLED_PD_BLITTER = 0,
806 GEN9_DECOUPLED_PD_RENDER,
807 GEN9_DECOUPLED_PD_MEDIA,
808 GEN9_DECOUPLED_PD_ALL
809};
810
811enum decoupled_ops {
812 GEN9_DECOUPLED_OP_WRITE = 0,
813 GEN9_DECOUPLED_OP_READ
814};
815
85ee17eb
PP
816static const enum decoupled_power_domain fw2dpd_domain[] = {
817 GEN9_DECOUPLED_PD_RENDER,
818 GEN9_DECOUPLED_PD_BLITTER,
819 GEN9_DECOUPLED_PD_ALL,
820 GEN9_DECOUPLED_PD_MEDIA,
821 GEN9_DECOUPLED_PD_ALL,
822 GEN9_DECOUPLED_PD_ALL,
823 GEN9_DECOUPLED_PD_ALL
824};
825
826/*
827 * Decoupled MMIO access for only 1 DWORD
828 */
829static void __gen9_decoupled_mmio_access(struct drm_i915_private *dev_priv,
830 u32 reg,
831 enum forcewake_domains fw_domain,
832 enum decoupled_ops operation)
833{
834 enum decoupled_power_domain dp_domain;
835 u32 ctrl_reg_data = 0;
836
837 dp_domain = fw2dpd_domain[fw_domain - 1];
838
839 ctrl_reg_data |= reg;
840 ctrl_reg_data |= (operation << GEN9_DECOUPLED_OP_SHIFT);
841 ctrl_reg_data |= (dp_domain << GEN9_DECOUPLED_PD_SHIFT);
842 ctrl_reg_data |= GEN9_DECOUPLED_DW1_GO;
843 __raw_i915_write32(dev_priv, GEN9_DECOUPLED_REG0_DW1, ctrl_reg_data);
844
845 if (wait_for_atomic((__raw_i915_read32(dev_priv,
846 GEN9_DECOUPLED_REG0_DW1) &
847 GEN9_DECOUPLED_DW1_GO) == 0,
848 FORCEWAKE_ACK_TIMEOUT_MS))
849 DRM_ERROR("Decoupled MMIO wait timed out\n");
850}
851
852static inline u32
853__gen9_decoupled_mmio_read32(struct drm_i915_private *dev_priv,
854 u32 reg,
855 enum forcewake_domains fw_domain)
856{
857 __gen9_decoupled_mmio_access(dev_priv, reg, fw_domain,
858 GEN9_DECOUPLED_OP_READ);
859
860 return __raw_i915_read32(dev_priv, GEN9_DECOUPLED_REG0_DW0);
861}
862
863static inline void
864__gen9_decoupled_mmio_write(struct drm_i915_private *dev_priv,
865 u32 reg, u32 data,
866 enum forcewake_domains fw_domain)
867{
868
869 __raw_i915_write32(dev_priv, GEN9_DECOUPLED_REG0_DW0, data);
870
871 __gen9_decoupled_mmio_access(dev_priv, reg, fw_domain,
872 GEN9_DECOUPLED_OP_WRITE);
873}
874
875
51f67885 876#define GEN2_READ_HEADER(x) \
5d738795 877 u##x val = 0; \
da5827c3 878 assert_rpm_wakelock_held(dev_priv);
5d738795 879
51f67885 880#define GEN2_READ_FOOTER \
5d738795
BW
881 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
882 return val
883
51f67885 884#define __gen2_read(x) \
0b274481 885static u##x \
f0f59a00 886gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
51f67885 887 GEN2_READ_HEADER(x); \
3967018e 888 val = __raw_i915_read##x(dev_priv, reg); \
51f67885 889 GEN2_READ_FOOTER; \
3967018e
BW
890}
891
892#define __gen5_read(x) \
893static u##x \
f0f59a00 894gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
51f67885 895 GEN2_READ_HEADER(x); \
3967018e
BW
896 ilk_dummy_write(dev_priv); \
897 val = __raw_i915_read##x(dev_priv, reg); \
51f67885 898 GEN2_READ_FOOTER; \
3967018e
BW
899}
900
51f67885
CW
901__gen5_read(8)
902__gen5_read(16)
903__gen5_read(32)
904__gen5_read(64)
905__gen2_read(8)
906__gen2_read(16)
907__gen2_read(32)
908__gen2_read(64)
909
910#undef __gen5_read
911#undef __gen2_read
912
913#undef GEN2_READ_FOOTER
914#undef GEN2_READ_HEADER
915
916#define GEN6_READ_HEADER(x) \
f0f59a00 917 u32 offset = i915_mmio_reg_offset(reg); \
51f67885
CW
918 unsigned long irqflags; \
919 u##x val = 0; \
da5827c3 920 assert_rpm_wakelock_held(dev_priv); \
9c053501
MK
921 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
922 unclaimed_reg_debug(dev_priv, reg, true, true)
51f67885
CW
923
924#define GEN6_READ_FOOTER \
9c053501 925 unclaimed_reg_debug(dev_priv, reg, true, false); \
51f67885
CW
926 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
927 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
928 return val
929
c521b0c8
TU
930static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
931 enum forcewake_domains fw_domains)
b2cff0db
CW
932{
933 struct intel_uncore_forcewake_domain *domain;
d2dc94bc
CW
934 unsigned int tmp;
935
936 GEM_BUG_ON(fw_domains & ~dev_priv->uncore.fw_domains);
b2cff0db 937
d2dc94bc 938 for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp)
c521b0c8
TU
939 fw_domain_arm_timer(domain);
940
941 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
c521b0c8
TU
942}
943
944static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
945 enum forcewake_domains fw_domains)
946{
b2cff0db
CW
947 if (WARN_ON(!fw_domains))
948 return;
949
003342a5
TU
950 /* Turn on all requested but inactive supported forcewake domains. */
951 fw_domains &= dev_priv->uncore.fw_domains;
952 fw_domains &= ~dev_priv->uncore.fw_domains_active;
b2cff0db 953
c521b0c8
TU
954 if (fw_domains)
955 ___force_wake_auto(dev_priv, fw_domains);
b2cff0db
CW
956}
957
ccfceda2 958#define __gen_read(func, x) \
3967018e 959static u##x \
ccfceda2 960func##_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
6863b76c 961 enum forcewake_domains fw_engine; \
51f67885 962 GEN6_READ_HEADER(x); \
ccfceda2 963 fw_engine = __##func##_reg_read_fw_domains(offset); \
6a42d0f4 964 if (fw_engine) \
b208ba8e 965 __force_wake_auto(dev_priv, fw_engine); \
6fe72865 966 val = __raw_i915_read##x(dev_priv, reg); \
51f67885 967 GEN6_READ_FOOTER; \
940aece4 968}
ccfceda2
DCS
969#define __gen6_read(x) __gen_read(gen6, x)
970#define __fwtable_read(x) __gen_read(fwtable, x)
940aece4 971
85ee17eb
PP
972#define __gen9_decoupled_read(x) \
973static u##x \
974gen9_decoupled_read##x(struct drm_i915_private *dev_priv, \
975 i915_reg_t reg, bool trace) { \
976 enum forcewake_domains fw_engine; \
977 GEN6_READ_HEADER(x); \
978 fw_engine = __fwtable_reg_read_fw_domains(offset); \
979 if (fw_engine & ~dev_priv->uncore.fw_domains_active) { \
980 unsigned i; \
981 u32 *ptr_data = (u32 *) &val; \
982 for (i = 0; i < x/32; i++, offset += sizeof(u32), ptr_data++) \
983 *ptr_data = __gen9_decoupled_mmio_read32(dev_priv, \
984 offset, \
985 fw_engine); \
986 } else { \
987 val = __raw_i915_read##x(dev_priv, reg); \
988 } \
989 GEN6_READ_FOOTER; \
990}
991
992__gen9_decoupled_read(32)
993__gen9_decoupled_read(64)
6044c4a3
TU
994__fwtable_read(8)
995__fwtable_read(16)
996__fwtable_read(32)
997__fwtable_read(64)
3967018e
BW
998__gen6_read(8)
999__gen6_read(16)
1000__gen6_read(32)
1001__gen6_read(64)
3967018e 1002
6044c4a3 1003#undef __fwtable_read
3967018e 1004#undef __gen6_read
51f67885
CW
1005#undef GEN6_READ_FOOTER
1006#undef GEN6_READ_HEADER
5d738795 1007
51f67885 1008#define GEN2_WRITE_HEADER \
5d738795 1009 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
da5827c3 1010 assert_rpm_wakelock_held(dev_priv); \
907b28c5 1011
51f67885 1012#define GEN2_WRITE_FOOTER
0d965301 1013
51f67885 1014#define __gen2_write(x) \
0b274481 1015static void \
f0f59a00 1016gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
51f67885 1017 GEN2_WRITE_HEADER; \
4032ef43 1018 __raw_i915_write##x(dev_priv, reg, val); \
51f67885 1019 GEN2_WRITE_FOOTER; \
4032ef43
BW
1020}
1021
1022#define __gen5_write(x) \
1023static void \
f0f59a00 1024gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
51f67885 1025 GEN2_WRITE_HEADER; \
4032ef43
BW
1026 ilk_dummy_write(dev_priv); \
1027 __raw_i915_write##x(dev_priv, reg, val); \
51f67885 1028 GEN2_WRITE_FOOTER; \
4032ef43
BW
1029}
1030
51f67885
CW
1031__gen5_write(8)
1032__gen5_write(16)
1033__gen5_write(32)
51f67885
CW
1034__gen2_write(8)
1035__gen2_write(16)
1036__gen2_write(32)
51f67885
CW
1037
1038#undef __gen5_write
1039#undef __gen2_write
1040
1041#undef GEN2_WRITE_FOOTER
1042#undef GEN2_WRITE_HEADER
1043
1044#define GEN6_WRITE_HEADER \
f0f59a00 1045 u32 offset = i915_mmio_reg_offset(reg); \
51f67885
CW
1046 unsigned long irqflags; \
1047 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
da5827c3 1048 assert_rpm_wakelock_held(dev_priv); \
9c053501
MK
1049 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
1050 unclaimed_reg_debug(dev_priv, reg, false, true)
51f67885
CW
1051
1052#define GEN6_WRITE_FOOTER \
9c053501 1053 unclaimed_reg_debug(dev_priv, reg, false, false); \
51f67885
CW
1054 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
1055
4032ef43
BW
1056#define __gen6_write(x) \
1057static void \
f0f59a00 1058gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
51f67885 1059 GEN6_WRITE_HEADER; \
a338908c
MK
1060 if (NEEDS_FORCE_WAKE(offset)) \
1061 __gen6_gt_wait_for_fifo(dev_priv); \
4032ef43 1062 __raw_i915_write##x(dev_priv, reg, val); \
51f67885 1063 GEN6_WRITE_FOOTER; \
4032ef43
BW
1064}
1065
ccfceda2 1066#define __gen_write(func, x) \
ab2aa47e 1067static void \
ccfceda2 1068func##_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
6863b76c 1069 enum forcewake_domains fw_engine; \
51f67885 1070 GEN6_WRITE_HEADER; \
ccfceda2 1071 fw_engine = __##func##_reg_write_fw_domains(offset); \
6a42d0f4 1072 if (fw_engine) \
b208ba8e 1073 __force_wake_auto(dev_priv, fw_engine); \
1938e59a 1074 __raw_i915_write##x(dev_priv, reg, val); \
51f67885 1075 GEN6_WRITE_FOOTER; \
1938e59a 1076}
ccfceda2
DCS
1077#define __gen8_write(x) __gen_write(gen8, x)
1078#define __fwtable_write(x) __gen_write(fwtable, x)
1938e59a 1079
85ee17eb
PP
1080#define __gen9_decoupled_write(x) \
1081static void \
1082gen9_decoupled_write##x(struct drm_i915_private *dev_priv, \
1083 i915_reg_t reg, u##x val, \
1084 bool trace) { \
1085 enum forcewake_domains fw_engine; \
1086 GEN6_WRITE_HEADER; \
1087 fw_engine = __fwtable_reg_write_fw_domains(offset); \
1088 if (fw_engine & ~dev_priv->uncore.fw_domains_active) \
1089 __gen9_decoupled_mmio_write(dev_priv, \
1090 offset, \
1091 val, \
1092 fw_engine); \
1093 else \
1094 __raw_i915_write##x(dev_priv, reg, val); \
1095 GEN6_WRITE_FOOTER; \
1096}
1097
1098__gen9_decoupled_write(32)
22d48c55
TU
1099__fwtable_write(8)
1100__fwtable_write(16)
1101__fwtable_write(32)
ab2aa47e
BW
1102__gen8_write(8)
1103__gen8_write(16)
1104__gen8_write(32)
4032ef43
BW
1105__gen6_write(8)
1106__gen6_write(16)
1107__gen6_write(32)
4032ef43 1108
22d48c55 1109#undef __fwtable_write
ab2aa47e 1110#undef __gen8_write
4032ef43 1111#undef __gen6_write
51f67885
CW
1112#undef GEN6_WRITE_FOOTER
1113#undef GEN6_WRITE_HEADER
907b28c5 1114
0757ac8f 1115#define ASSIGN_WRITE_MMIO_VFUNCS(i915, x) \
43d942a7 1116do { \
0757ac8f
CW
1117 (i915)->uncore.funcs.mmio_writeb = x##_write8; \
1118 (i915)->uncore.funcs.mmio_writew = x##_write16; \
1119 (i915)->uncore.funcs.mmio_writel = x##_write32; \
43d942a7
YZ
1120} while (0)
1121
0757ac8f 1122#define ASSIGN_READ_MMIO_VFUNCS(i915, x) \
43d942a7 1123do { \
0757ac8f
CW
1124 (i915)->uncore.funcs.mmio_readb = x##_read8; \
1125 (i915)->uncore.funcs.mmio_readw = x##_read16; \
1126 (i915)->uncore.funcs.mmio_readl = x##_read32; \
1127 (i915)->uncore.funcs.mmio_readq = x##_read64; \
43d942a7
YZ
1128} while (0)
1129
05a2fb15
MK
1130
1131static void fw_domain_init(struct drm_i915_private *dev_priv,
48c1026a 1132 enum forcewake_domain_id domain_id,
f0f59a00
VS
1133 i915_reg_t reg_set,
1134 i915_reg_t reg_ack)
05a2fb15
MK
1135{
1136 struct intel_uncore_forcewake_domain *d;
1137
1138 if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
1139 return;
1140
1141 d = &dev_priv->uncore.fw_domain[domain_id];
1142
1143 WARN_ON(d->wake_count);
1144
6e3955a5
CW
1145 WARN_ON(!i915_mmio_reg_valid(reg_set));
1146 WARN_ON(!i915_mmio_reg_valid(reg_ack));
1147
05a2fb15
MK
1148 d->wake_count = 0;
1149 d->reg_set = reg_set;
1150 d->reg_ack = reg_ack;
1151
05a2fb15
MK
1152 d->id = domain_id;
1153
33c582c1
TU
1154 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
1155 BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
1156 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
1157
d2dc94bc 1158 d->mask = BIT(domain_id);
33c582c1 1159
a57a4a67
TU
1160 hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1161 d->timer.function = intel_uncore_fw_release_timer;
05a2fb15 1162
6e3955a5 1163 dev_priv->uncore.fw_domains |= BIT(domain_id);
f9b3927a 1164
577ac4bd 1165 fw_domain_reset(dev_priv, d);
05a2fb15
MK
1166}
1167
dc97997a 1168static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
0b274481 1169{
e3b1895f 1170 if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv))
3225b2f9
MK
1171 return;
1172
6e3955a5
CW
1173 if (IS_GEN6(dev_priv)) {
1174 dev_priv->uncore.fw_reset = 0;
1175 dev_priv->uncore.fw_set = FORCEWAKE_KERNEL;
1176 dev_priv->uncore.fw_clear = 0;
1177 } else {
1178 /* WaRsClearFWBitsAtReset:bdw,skl */
1179 dev_priv->uncore.fw_reset = _MASKED_BIT_DISABLE(0xffff);
1180 dev_priv->uncore.fw_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
1181 dev_priv->uncore.fw_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
1182 }
1183
dc97997a 1184 if (IS_GEN9(dev_priv)) {
05a2fb15
MK
1185 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1186 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1187 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1188 FORCEWAKE_RENDER_GEN9,
1189 FORCEWAKE_ACK_RENDER_GEN9);
1190 fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
1191 FORCEWAKE_BLITTER_GEN9,
1192 FORCEWAKE_ACK_BLITTER_GEN9);
1193 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1194 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
dc97997a 1195 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
05a2fb15 1196 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
a338908c 1197 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
05a2fb15
MK
1198 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1199 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1200 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1201 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
dc97997a 1202 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
05a2fb15
MK
1203 dev_priv->uncore.funcs.force_wake_get =
1204 fw_domains_get_with_thread_status;
a338908c 1205 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
05a2fb15
MK
1206 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1207 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
dc97997a 1208 } else if (IS_IVYBRIDGE(dev_priv)) {
0b274481
BW
1209 u32 ecobus;
1210
1211 /* IVB configs may use multi-threaded forcewake */
1212
1213 /* A small trick here - if the bios hasn't configured
1214 * MT forcewake, and if the device is in RC6, then
1215 * force_wake_mt_get will not wake the device and the
1216 * ECOBUS read will return zero. Which will be
1217 * (correctly) interpreted by the test below as MT
1218 * forcewake being disabled.
1219 */
05a2fb15
MK
1220 dev_priv->uncore.funcs.force_wake_get =
1221 fw_domains_get_with_thread_status;
a338908c 1222 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
05a2fb15 1223
f9b3927a
MK
1224 /* We need to init first for ECOBUS access and then
1225 * determine later if we want to reinit, in case of MT access is
6ea2556f
MK
1226 * not working. In this stage we don't know which flavour this
1227 * ivb is, so it is better to reset also the gen6 fw registers
1228 * before the ecobus check.
f9b3927a 1229 */
6ea2556f
MK
1230
1231 __raw_i915_write32(dev_priv, FORCEWAKE, 0);
1232 __raw_posting_read(dev_priv, ECOBUS);
1233
05a2fb15
MK
1234 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1235 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
f9b3927a 1236
556ab7a6 1237 spin_lock_irq(&dev_priv->uncore.lock);
bd527504 1238 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_RENDER);
0b274481 1239 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
a338908c 1240 fw_domains_put(dev_priv, FORCEWAKE_RENDER);
556ab7a6 1241 spin_unlock_irq(&dev_priv->uncore.lock);
0b274481 1242
05a2fb15 1243 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
0b274481
BW
1244 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1245 DRM_INFO("when using vblank-synced partial screen updates.\n");
05a2fb15
MK
1246 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1247 FORCEWAKE, FORCEWAKE_ACK);
0b274481 1248 }
dc97997a 1249 } else if (IS_GEN6(dev_priv)) {
0b274481 1250 dev_priv->uncore.funcs.force_wake_get =
05a2fb15 1251 fw_domains_get_with_thread_status;
a338908c 1252 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
05a2fb15
MK
1253 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1254 FORCEWAKE, FORCEWAKE_ACK);
0b274481 1255 }
3225b2f9
MK
1256
1257 /* All future platforms are expected to require complex power gating */
1258 WARN_ON(dev_priv->uncore.fw_domains == 0);
f9b3927a
MK
1259}
1260
15157970
TU
1261#define ASSIGN_FW_DOMAINS_TABLE(d) \
1262{ \
1263 dev_priv->uncore.fw_domains_table = \
1264 (struct intel_forcewake_range *)(d); \
1265 dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \
1266}
1267
264ec1a8
HG
1268static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
1269 unsigned long action, void *data)
1270{
1271 struct drm_i915_private *dev_priv = container_of(nb,
1272 struct drm_i915_private, uncore.pmic_bus_access_nb);
1273
1274 switch (action) {
1275 case MBI_PMIC_BUS_ACCESS_BEGIN:
1276 /*
1277 * forcewake all now to make sure that we don't need to do a
1278 * forcewake later which on systems where this notifier gets
1279 * called requires the punit to access to the shared pmic i2c
1280 * bus, which will be busy after this notification, leading to:
1281 * "render: timed out waiting for forcewake ack request."
1282 * errors.
1283 */
1284 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1285 break;
1286 case MBI_PMIC_BUS_ACCESS_END:
1287 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1288 break;
1289 }
1290
1291 return NOTIFY_OK;
1292}
1293
dc97997a 1294void intel_uncore_init(struct drm_i915_private *dev_priv)
f9b3927a 1295{
dc97997a 1296 i915_check_vgpu(dev_priv);
cf9d2890 1297
3accaf7e 1298 intel_uncore_edram_detect(dev_priv);
dc97997a
CW
1299 intel_uncore_fw_domains_init(dev_priv);
1300 __intel_uncore_early_sanitize(dev_priv, false);
0b274481 1301
75714940 1302 dev_priv->uncore.unclaimed_mmio_check = 1;
264ec1a8
HG
1303 dev_priv->uncore.pmic_bus_access_nb.notifier_call =
1304 i915_pmic_bus_access_notifier;
75714940 1305
e3b1895f 1306 if (IS_GEN(dev_priv, 2, 4) || intel_vgpu_active(dev_priv)) {
0757ac8f
CW
1307 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen2);
1308 ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen2);
e3b1895f 1309 } else if (IS_GEN5(dev_priv)) {
0757ac8f
CW
1310 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen5);
1311 ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen5);
e3b1895f 1312 } else if (IS_GEN(dev_priv, 6, 7)) {
0757ac8f 1313 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen6);
e3b1895f
TU
1314
1315 if (IS_VALLEYVIEW(dev_priv)) {
1316 ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges);
0757ac8f 1317 ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
e3b1895f 1318 } else {
0757ac8f 1319 ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
85ee17eb 1320 }
e3b1895f 1321 } else if (IS_GEN8(dev_priv)) {
dc97997a 1322 if (IS_CHERRYVIEW(dev_priv)) {
15157970 1323 ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
0757ac8f
CW
1324 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
1325 ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
1938e59a
D
1326
1327 } else {
0757ac8f
CW
1328 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen8);
1329 ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
1938e59a 1330 }
e3b1895f
TU
1331 } else {
1332 ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
0757ac8f
CW
1333 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
1334 ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
e3b1895f
TU
1335 if (HAS_DECOUPLED_MMIO(dev_priv)) {
1336 dev_priv->uncore.funcs.mmio_readl =
1337 gen9_decoupled_read32;
1338 dev_priv->uncore.funcs.mmio_readq =
1339 gen9_decoupled_read64;
1340 dev_priv->uncore.funcs.mmio_writel =
1341 gen9_decoupled_write32;
940aece4 1342 }
3967018e 1343 }
ed493883 1344
264ec1a8
HG
1345 iosf_mbi_register_pmic_bus_access_notifier(
1346 &dev_priv->uncore.pmic_bus_access_nb);
1347
dc97997a 1348 i915_check_and_clear_faults(dev_priv);
0b274481
BW
1349}
1350
dc97997a 1351void intel_uncore_fini(struct drm_i915_private *dev_priv)
0b274481 1352{
264ec1a8
HG
1353 iosf_mbi_unregister_pmic_bus_access_notifier(
1354 &dev_priv->uncore.pmic_bus_access_nb);
1355
0b274481 1356 /* Paranoia: make sure we have disabled everything before we exit. */
dc97997a
CW
1357 intel_uncore_sanitize(dev_priv);
1358 intel_uncore_forcewake_reset(dev_priv, false);
0b274481
BW
1359}
1360
ae5702d2 1361#define GEN_RANGE(l, h) GENMASK((h) - 1, (l) - 1)
af76ae44 1362
907b28c5 1363static const struct register_whitelist {
f0f59a00 1364 i915_reg_t offset_ldw, offset_udw;
907b28c5 1365 uint32_t size;
af76ae44
DL
1366 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1367 uint32_t gen_bitmask;
907b28c5 1368} whitelist[] = {
8697600b
VS
1369 { .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
1370 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
1371 .size = 8, .gen_bitmask = GEN_RANGE(4, 9) },
907b28c5
CW
1372};
1373
1374int i915_reg_read_ioctl(struct drm_device *dev,
1375 void *data, struct drm_file *file)
1376{
fac5e23e 1377 struct drm_i915_private *dev_priv = to_i915(dev);
907b28c5
CW
1378 struct drm_i915_reg_read *reg = data;
1379 struct register_whitelist const *entry = whitelist;
648a9bc5 1380 unsigned size;
f0f59a00 1381 i915_reg_t offset_ldw, offset_udw;
cf67c70f 1382 int i, ret = 0;
907b28c5
CW
1383
1384 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
f0f59a00 1385 if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
66478475 1386 (INTEL_INFO(dev_priv)->gen_mask & entry->gen_bitmask))
907b28c5
CW
1387 break;
1388 }
1389
1390 if (i == ARRAY_SIZE(whitelist))
1391 return -EINVAL;
1392
648a9bc5
CW
1393 /* We use the low bits to encode extra flags as the register should
1394 * be naturally aligned (and those that are not so aligned merely
1395 * limit the available flags for that register).
1396 */
8697600b
VS
1397 offset_ldw = entry->offset_ldw;
1398 offset_udw = entry->offset_udw;
648a9bc5 1399 size = entry->size;
f0f59a00 1400 size |= reg->offset ^ i915_mmio_reg_offset(offset_ldw);
648a9bc5 1401
cf67c70f
PZ
1402 intel_runtime_pm_get(dev_priv);
1403
648a9bc5
CW
1404 switch (size) {
1405 case 8 | 1:
8697600b 1406 reg->val = I915_READ64_2x32(offset_ldw, offset_udw);
648a9bc5 1407 break;
907b28c5 1408 case 8:
8697600b 1409 reg->val = I915_READ64(offset_ldw);
907b28c5
CW
1410 break;
1411 case 4:
8697600b 1412 reg->val = I915_READ(offset_ldw);
907b28c5
CW
1413 break;
1414 case 2:
8697600b 1415 reg->val = I915_READ16(offset_ldw);
907b28c5
CW
1416 break;
1417 case 1:
8697600b 1418 reg->val = I915_READ8(offset_ldw);
907b28c5
CW
1419 break;
1420 default:
cf67c70f
PZ
1421 ret = -EINVAL;
1422 goto out;
907b28c5
CW
1423 }
1424
cf67c70f
PZ
1425out:
1426 intel_runtime_pm_put(dev_priv);
1427 return ret;
907b28c5
CW
1428}
1429
2c80353f
MK
1430static void gen3_stop_rings(struct drm_i915_private *dev_priv)
1431{
1432 struct intel_engine_cs *engine;
1433 enum intel_engine_id id;
1434
1435 for_each_engine(engine, dev_priv, id) {
1436 const u32 base = engine->mmio_base;
1437 const i915_reg_t mode = RING_MI_MODE(base);
1438
1439 I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));
1440 if (intel_wait_for_register_fw(dev_priv,
1441 mode,
1442 MODE_IDLE,
1443 MODE_IDLE,
1444 500))
1445 DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n",
1446 engine->name);
1447
1448 I915_WRITE_FW(RING_CTL(base), 0);
1449 I915_WRITE_FW(RING_HEAD(base), 0);
1450 I915_WRITE_FW(RING_TAIL(base), 0);
1451
1452 /* Check acts as a post */
1453 if (I915_READ_FW(RING_HEAD(base)) != 0)
1454 DRM_DEBUG_DRIVER("%s: ring head not parked\n",
1455 engine->name);
1456 }
1457}
1458
9593a657 1459static bool i915_reset_complete(struct pci_dev *pdev)
907b28c5
CW
1460{
1461 u8 gdrst;
9593a657 1462
dc97997a 1463 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
73bbf6bd 1464 return (gdrst & GRDOM_RESET_STATUS) == 0;
907b28c5
CW
1465}
1466
dc97997a 1467static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
907b28c5 1468{
91c8a326 1469 struct pci_dev *pdev = dev_priv->drm.pdev;
dc97997a 1470
73bbf6bd 1471 /* assert reset for at least 20 usec */
dc97997a 1472 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
9593a657 1473 usleep_range(50, 200);
dc97997a 1474 pci_write_config_byte(pdev, I915_GDRST, 0);
907b28c5 1475
dc97997a 1476 return wait_for(i915_reset_complete(pdev), 500);
73bbf6bd
VS
1477}
1478
9593a657 1479static bool g4x_reset_complete(struct pci_dev *pdev)
73bbf6bd
VS
1480{
1481 u8 gdrst;
9593a657 1482
dc97997a 1483 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
73bbf6bd 1484 return (gdrst & GRDOM_RESET_ENABLE) == 0;
907b28c5
CW
1485}
1486
dc97997a 1487static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
408d4b9e 1488{
91c8a326 1489 struct pci_dev *pdev = dev_priv->drm.pdev;
9593a657 1490
48f1fc3a
MK
1491 /* Stop engines before we reset; see g4x_do_reset() below for why. */
1492 gen3_stop_rings(dev_priv);
1493
dc97997a
CW
1494 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1495 return wait_for(g4x_reset_complete(pdev), 500);
408d4b9e
VS
1496}
1497
dc97997a 1498static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
fa4f53c4 1499{
91c8a326 1500 struct pci_dev *pdev = dev_priv->drm.pdev;
fa4f53c4
VS
1501 int ret;
1502
fa4f53c4 1503 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
44e1e7ba
CW
1504 I915_WRITE(VDECCLK_GATE_D,
1505 I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
fa4f53c4
VS
1506 POSTING_READ(VDECCLK_GATE_D);
1507
2c80353f
MK
1508 /* We stop engines, otherwise we might get failed reset and a
1509 * dead gpu (on elk).
1510 * WaMediaResetMainRingCleanup:ctg,elk (presumably)
1511 */
1512 gen3_stop_rings(dev_priv);
1513
dc97997a 1514 pci_write_config_byte(pdev, I915_GDRST,
fa4f53c4 1515 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
dc97997a 1516 ret = wait_for(g4x_reset_complete(pdev), 500);
9593a657
CW
1517 if (ret) {
1518 DRM_DEBUG_DRIVER("Wait for media reset failed\n");
44e1e7ba 1519 goto out;
9593a657 1520 }
fa4f53c4 1521
44e1e7ba
CW
1522 pci_write_config_byte(pdev, I915_GDRST,
1523 GRDOM_RENDER | GRDOM_RESET_ENABLE);
1524 ret = wait_for(g4x_reset_complete(pdev), 500);
1525 if (ret) {
1526 DRM_DEBUG_DRIVER("Wait for render reset failed\n");
1527 goto out;
1528 }
fa4f53c4 1529
9593a657 1530out:
dc97997a 1531 pci_write_config_byte(pdev, I915_GDRST, 0);
44e1e7ba
CW
1532
1533 I915_WRITE(VDECCLK_GATE_D,
1534 I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1535 POSTING_READ(VDECCLK_GATE_D);
1536
9593a657 1537 return ret;
fa4f53c4
VS
1538}
1539
dc97997a
CW
1540static int ironlake_do_reset(struct drm_i915_private *dev_priv,
1541 unsigned engine_mask)
907b28c5 1542{
907b28c5
CW
1543 int ret;
1544
9593a657 1545 I915_WRITE(ILK_GDSR, ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
87273b71
CW
1546 ret = intel_wait_for_register(dev_priv,
1547 ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
1548 500);
9593a657
CW
1549 if (ret) {
1550 DRM_DEBUG_DRIVER("Wait for render reset failed\n");
1551 goto out;
1552 }
907b28c5 1553
9593a657 1554 I915_WRITE(ILK_GDSR, ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
87273b71
CW
1555 ret = intel_wait_for_register(dev_priv,
1556 ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
1557 500);
9593a657
CW
1558 if (ret) {
1559 DRM_DEBUG_DRIVER("Wait for media reset failed\n");
1560 goto out;
1561 }
9aa7250f 1562
9593a657 1563out:
c039b7f2 1564 I915_WRITE(ILK_GDSR, 0);
9593a657
CW
1565 POSTING_READ(ILK_GDSR);
1566 return ret;
907b28c5
CW
1567}
1568
ee4b6faf
MK
1569/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
1570static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
1571 u32 hw_domain_mask)
907b28c5 1572{
9593a657
CW
1573 int err;
1574
907b28c5
CW
1575 /* GEN6_GDRST is not in the gt power well, no need to check
1576 * for fifo space for the write or forcewake the chip for
1577 * the read
1578 */
ee4b6faf 1579 __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
907b28c5 1580
a3662830 1581 /* Wait for the device to ack the reset requests */
9593a657 1582 err = intel_wait_for_register_fw(dev_priv,
4a17fe13
CW
1583 GEN6_GDRST, hw_domain_mask, 0,
1584 500);
9593a657
CW
1585 if (err)
1586 DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n",
1587 hw_domain_mask);
1588
1589 return err;
ee4b6faf
MK
1590}
1591
1592/**
1593 * gen6_reset_engines - reset individual engines
dc97997a 1594 * @dev_priv: i915 device
ee4b6faf
MK
1595 * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
1596 *
1597 * This function will reset the individual engines that are set in engine_mask.
1598 * If you provide ALL_ENGINES as mask, full global domain reset will be issued.
1599 *
1600 * Note: It is responsibility of the caller to handle the difference between
1601 * asking full domain reset versus reset for all available individual engines.
1602 *
1603 * Returns 0 on success, nonzero on error.
1604 */
dc97997a
CW
1605static int gen6_reset_engines(struct drm_i915_private *dev_priv,
1606 unsigned engine_mask)
ee4b6faf 1607{
ee4b6faf
MK
1608 struct intel_engine_cs *engine;
1609 const u32 hw_engine_mask[I915_NUM_ENGINES] = {
1610 [RCS] = GEN6_GRDOM_RENDER,
1611 [BCS] = GEN6_GRDOM_BLT,
1612 [VCS] = GEN6_GRDOM_MEDIA,
1613 [VCS2] = GEN8_GRDOM_MEDIA2,
1614 [VECS] = GEN6_GRDOM_VECS,
1615 };
1616 u32 hw_mask;
1617 int ret;
1618
1619 if (engine_mask == ALL_ENGINES) {
1620 hw_mask = GEN6_GRDOM_FULL;
1621 } else {
bafb0fce
CW
1622 unsigned int tmp;
1623
ee4b6faf 1624 hw_mask = 0;
bafb0fce 1625 for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
ee4b6faf
MK
1626 hw_mask |= hw_engine_mask[engine->id];
1627 }
1628
1629 ret = gen6_hw_domain_reset(dev_priv, hw_mask);
907b28c5 1630
dc97997a 1631 intel_uncore_forcewake_reset(dev_priv, true);
5babf0fc 1632
907b28c5
CW
1633 return ret;
1634}
1635
1758b90e 1636/**
1d1a9774 1637 * __intel_wait_for_register_fw - wait until register matches expected state
1758b90e
CW
1638 * @dev_priv: the i915 device
1639 * @reg: the register to read
1640 * @mask: mask to apply to register value
1641 * @value: expected value
1d1a9774
MW
1642 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
1643 * @slow_timeout_ms: slow timeout in millisecond
1644 * @out_value: optional placeholder to hold registry value
1758b90e
CW
1645 *
1646 * This routine waits until the target register @reg contains the expected
3d466cd6
DV
1647 * @value after applying the @mask, i.e. it waits until ::
1648 *
1649 * (I915_READ_FW(reg) & mask) == value
1650 *
1d1a9774 1651 * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
6976e74b 1652 * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
84d84cb7 1653 * must be not larger than 20,0000 microseconds.
1758b90e
CW
1654 *
1655 * Note that this routine assumes the caller holds forcewake asserted, it is
1656 * not suitable for very long waits. See intel_wait_for_register() if you
1657 * wish to wait without holding forcewake for the duration (i.e. you expect
1658 * the wait to be slow).
1659 *
1660 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1661 */
1d1a9774
MW
1662int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
1663 i915_reg_t reg,
3fc7d86b
MW
1664 u32 mask,
1665 u32 value,
1666 unsigned int fast_timeout_us,
1667 unsigned int slow_timeout_ms,
1d1a9774 1668 u32 *out_value)
1758b90e 1669{
ff26ffa8 1670 u32 uninitialized_var(reg_value);
1d1a9774
MW
1671#define done (((reg_value = I915_READ_FW(reg)) & mask) == value)
1672 int ret;
1673
6976e74b 1674 /* Catch any overuse of this function */
84d84cb7
CW
1675 might_sleep_if(slow_timeout_ms);
1676 GEM_BUG_ON(fast_timeout_us > 20000);
6976e74b 1677
84d84cb7
CW
1678 ret = -ETIMEDOUT;
1679 if (fast_timeout_us && fast_timeout_us <= 20000)
1d1a9774 1680 ret = _wait_for_atomic(done, fast_timeout_us, 0);
ff26ffa8 1681 if (ret && slow_timeout_ms)
1d1a9774 1682 ret = wait_for(done, slow_timeout_ms);
84d84cb7 1683
1d1a9774
MW
1684 if (out_value)
1685 *out_value = reg_value;
84d84cb7 1686
1758b90e
CW
1687 return ret;
1688#undef done
1689}
1690
1691/**
1692 * intel_wait_for_register - wait until register matches expected state
1693 * @dev_priv: the i915 device
1694 * @reg: the register to read
1695 * @mask: mask to apply to register value
1696 * @value: expected value
1697 * @timeout_ms: timeout in millisecond
1698 *
1699 * This routine waits until the target register @reg contains the expected
3d466cd6
DV
1700 * @value after applying the @mask, i.e. it waits until ::
1701 *
1702 * (I915_READ(reg) & mask) == value
1703 *
1758b90e
CW
1704 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
1705 *
1706 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1707 */
1708int intel_wait_for_register(struct drm_i915_private *dev_priv,
1709 i915_reg_t reg,
3fc7d86b
MW
1710 u32 mask,
1711 u32 value,
1712 unsigned int timeout_ms)
7fd2d269 1713{
1758b90e
CW
1714 unsigned fw =
1715 intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
1716 int ret;
1717
05646543
CW
1718 might_sleep();
1719
1720 spin_lock_irq(&dev_priv->uncore.lock);
1721 intel_uncore_forcewake_get__locked(dev_priv, fw);
1722
1723 ret = __intel_wait_for_register_fw(dev_priv,
1724 reg, mask, value,
1725 2, 0, NULL);
1726
1727 intel_uncore_forcewake_put__locked(dev_priv, fw);
1728 spin_unlock_irq(&dev_priv->uncore.lock);
1729
1758b90e
CW
1730 if (ret)
1731 ret = wait_for((I915_READ_NOTRACE(reg) & mask) == value,
1732 timeout_ms);
1733
1734 return ret;
d431440c
TE
1735}
1736
e3895af8 1737static int gen8_reset_engine_start(struct intel_engine_cs *engine)
d431440c 1738{
c033666a 1739 struct drm_i915_private *dev_priv = engine->i915;
d431440c 1740 int ret;
d431440c
TE
1741
1742 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1743 _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
1744
1758b90e
CW
1745 ret = intel_wait_for_register_fw(dev_priv,
1746 RING_RESET_CTL(engine->mmio_base),
1747 RESET_CTL_READY_TO_RESET,
1748 RESET_CTL_READY_TO_RESET,
1749 700);
d431440c
TE
1750 if (ret)
1751 DRM_ERROR("%s: reset request timeout\n", engine->name);
1752
1753 return ret;
1754}
1755
e3895af8 1756static void gen8_reset_engine_cancel(struct intel_engine_cs *engine)
d431440c 1757{
c033666a 1758 struct drm_i915_private *dev_priv = engine->i915;
d431440c
TE
1759
1760 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1761 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
7fd2d269
MK
1762}
1763
dc97997a
CW
1764static int gen8_reset_engines(struct drm_i915_private *dev_priv,
1765 unsigned engine_mask)
7fd2d269 1766{
7fd2d269 1767 struct intel_engine_cs *engine;
bafb0fce 1768 unsigned int tmp;
7fd2d269 1769
bafb0fce 1770 for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
e3895af8 1771 if (gen8_reset_engine_start(engine))
7fd2d269 1772 goto not_ready;
7fd2d269 1773
dc97997a 1774 return gen6_reset_engines(dev_priv, engine_mask);
7fd2d269
MK
1775
1776not_ready:
bafb0fce 1777 for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
e3895af8 1778 gen8_reset_engine_cancel(engine);
7fd2d269
MK
1779
1780 return -EIO;
1781}
1782
dc97997a
CW
1783typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
1784
1785static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
907b28c5 1786{
b1330fbb
CW
1787 if (!i915.reset)
1788 return NULL;
1789
dc97997a 1790 if (INTEL_INFO(dev_priv)->gen >= 8)
ee4b6faf 1791 return gen8_reset_engines;
dc97997a 1792 else if (INTEL_INFO(dev_priv)->gen >= 6)
ee4b6faf 1793 return gen6_reset_engines;
dc97997a 1794 else if (IS_GEN5(dev_priv))
49e4d842 1795 return ironlake_do_reset;
dc97997a 1796 else if (IS_G4X(dev_priv))
49e4d842 1797 return g4x_do_reset;
73f67aa8 1798 else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
49e4d842 1799 return g33_do_reset;
dc97997a 1800 else if (INTEL_INFO(dev_priv)->gen >= 3)
49e4d842 1801 return i915_do_reset;
542c184f 1802 else
49e4d842
CW
1803 return NULL;
1804}
1805
dc97997a 1806int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
49e4d842 1807{
dc97997a 1808 reset_func reset;
9593a657 1809 int retry;
99106bc1 1810 int ret;
49e4d842 1811
9593a657
CW
1812 might_sleep();
1813
dc97997a 1814 reset = intel_get_gpu_reset(dev_priv);
49e4d842 1815 if (reset == NULL)
542c184f 1816 return -ENODEV;
49e4d842 1817
99106bc1
MK
1818 /* If the power well sleeps during the reset, the reset
1819 * request may be dropped and never completes (causing -EIO).
1820 */
1821 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
9593a657
CW
1822 for (retry = 0; retry < 3; retry++) {
1823 ret = reset(dev_priv, engine_mask);
1824 if (ret != -ETIMEDOUT)
1825 break;
1826
1827 cond_resched();
1828 }
99106bc1
MK
1829 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1830
1831 return ret;
49e4d842
CW
1832}
1833
dc97997a 1834bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
49e4d842 1835{
dc97997a 1836 return intel_get_gpu_reset(dev_priv) != NULL;
907b28c5
CW
1837}
1838
6b332fa2
AS
1839int intel_guc_reset(struct drm_i915_private *dev_priv)
1840{
1841 int ret;
6b332fa2 1842
1a3d1898 1843 if (!HAS_GUC(dev_priv))
6b332fa2
AS
1844 return -EINVAL;
1845
1846 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
6b332fa2 1847 ret = gen6_hw_domain_reset(dev_priv, GEN9_GRDOM_GUC);
6b332fa2
AS
1848 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1849
1850 return ret;
1851}
1852
fc97618b 1853bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
907b28c5 1854{
fc97618b 1855 return check_for_unclaimed_mmio(dev_priv);
907b28c5 1856}
75714940 1857
bc3b9346 1858bool
75714940
MK
1859intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
1860{
1861 if (unlikely(i915.mmio_debug ||
1862 dev_priv->uncore.unclaimed_mmio_check <= 0))
bc3b9346 1863 return false;
75714940
MK
1864
1865 if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
1866 DRM_DEBUG("Unclaimed register detected, "
1867 "enabling oneshot unclaimed register reporting. "
1868 "Please use i915.mmio_debug=N for more information.\n");
1869 i915.mmio_debug++;
1870 dev_priv->uncore.unclaimed_mmio_check--;
bc3b9346 1871 return true;
75714940 1872 }
bc3b9346
MK
1873
1874 return false;
75714940 1875}
3756685a
TU
1876
1877static enum forcewake_domains
1878intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
1879 i915_reg_t reg)
1880{
895833bd 1881 u32 offset = i915_mmio_reg_offset(reg);
3756685a
TU
1882 enum forcewake_domains fw_domains;
1883
895833bd
TU
1884 if (HAS_FWTABLE(dev_priv)) {
1885 fw_domains = __fwtable_reg_read_fw_domains(offset);
1886 } else if (INTEL_GEN(dev_priv) >= 6) {
1887 fw_domains = __gen6_reg_read_fw_domains(offset);
1888 } else {
1889 WARN_ON(!IS_GEN(dev_priv, 2, 5));
1890 fw_domains = 0;
3756685a
TU
1891 }
1892
1893 WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
1894
1895 return fw_domains;
1896}
1897
1898static enum forcewake_domains
1899intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
1900 i915_reg_t reg)
1901{
22d48c55 1902 u32 offset = i915_mmio_reg_offset(reg);
3756685a
TU
1903 enum forcewake_domains fw_domains;
1904
22d48c55
TU
1905 if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
1906 fw_domains = __fwtable_reg_write_fw_domains(offset);
1907 } else if (IS_GEN8(dev_priv)) {
1908 fw_domains = __gen8_reg_write_fw_domains(offset);
1909 } else if (IS_GEN(dev_priv, 6, 7)) {
3756685a 1910 fw_domains = FORCEWAKE_RENDER;
22d48c55
TU
1911 } else {
1912 WARN_ON(!IS_GEN(dev_priv, 2, 5));
1913 fw_domains = 0;
3756685a
TU
1914 }
1915
1916 WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
1917
1918 return fw_domains;
1919}
1920
1921/**
1922 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
1923 * a register
1924 * @dev_priv: pointer to struct drm_i915_private
1925 * @reg: register in question
1926 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
1927 *
1928 * Returns a set of forcewake domains required to be taken with for example
1929 * intel_uncore_forcewake_get for the specified register to be accessible in the
1930 * specified mode (read, write or read/write) with raw mmio accessors.
1931 *
1932 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
1933 * callers to do FIFO management on their own or risk losing writes.
1934 */
1935enum forcewake_domains
1936intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
1937 i915_reg_t reg, unsigned int op)
1938{
1939 enum forcewake_domains fw_domains = 0;
1940
1941 WARN_ON(!op);
1942
895833bd
TU
1943 if (intel_vgpu_active(dev_priv))
1944 return 0;
1945
3756685a
TU
1946 if (op & FW_REG_READ)
1947 fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg);
1948
1949 if (op & FW_REG_WRITE)
1950 fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg);
1951
1952 return fw_domains;
1953}
26e7a2a1
CW
1954
1955#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
0757ac8f 1956#include "selftests/mock_uncore.c"
26e7a2a1
CW
1957#include "selftests/intel_uncore.c"
1958#endif