drm/i915/gen9+: Kill off hw_ddb from intel_crtc.
[linux-2.6-block.git] / drivers / gpu / drm / i915 / intel_uncore.c
CommitLineData
907b28c5
CW
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include "i915_drv.h"
25#include "intel_drv.h"
cf9d2890 26#include "i915_vgpu.h"
907b28c5 27
6daccb0b
CW
28#include <linux/pm_runtime.h>
29
83e33372 30#define FORCEWAKE_ACK_TIMEOUT_MS 50
907b28c5 31
75aa3f63 32#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
6af5d92f 33
05a2fb15
MK
34static const char * const forcewake_domain_names[] = {
35 "render",
36 "blitter",
37 "media",
38};
39
40const char *
48c1026a 41intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
05a2fb15 42{
53abb679 43 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
05a2fb15
MK
44
45 if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
46 return forcewake_domain_names[id];
47
48 WARN_ON(id);
49
50 return "unknown";
51}
52
05a2fb15
MK
53static inline void
54fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
907b28c5 55{
f0f59a00 56 WARN_ON(!i915_mmio_reg_valid(d->reg_set));
05a2fb15 57 __raw_i915_write32(d->i915, d->reg_set, d->val_reset);
907b28c5
CW
58}
59
05a2fb15
MK
60static inline void
61fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
907b28c5 62{
a57a4a67
TU
63 d->wake_count++;
64 hrtimer_start_range_ns(&d->timer,
65 ktime_set(0, NSEC_PER_MSEC),
66 NSEC_PER_MSEC,
67 HRTIMER_MODE_REL);
907b28c5
CW
68}
69
05a2fb15
MK
70static inline void
71fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
907b28c5 72{
05a2fb15
MK
73 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
74 FORCEWAKE_KERNEL) == 0,
907b28c5 75 FORCEWAKE_ACK_TIMEOUT_MS))
05a2fb15
MK
76 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
77 intel_uncore_forcewake_domain_to_str(d->id));
78}
907b28c5 79
05a2fb15
MK
80static inline void
81fw_domain_get(const struct intel_uncore_forcewake_domain *d)
82{
83 __raw_i915_write32(d->i915, d->reg_set, d->val_set);
84}
907b28c5 85
05a2fb15
MK
86static inline void
87fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
88{
89 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
90 FORCEWAKE_KERNEL),
907b28c5 91 FORCEWAKE_ACK_TIMEOUT_MS))
05a2fb15
MK
92 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
93 intel_uncore_forcewake_domain_to_str(d->id));
94}
907b28c5 95
05a2fb15
MK
96static inline void
97fw_domain_put(const struct intel_uncore_forcewake_domain *d)
98{
99 __raw_i915_write32(d->i915, d->reg_set, d->val_clear);
907b28c5
CW
100}
101
05a2fb15
MK
102static inline void
103fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
907b28c5 104{
05a2fb15 105 /* something from same cacheline, but not from the set register */
f0f59a00 106 if (i915_mmio_reg_valid(d->reg_post))
05a2fb15 107 __raw_posting_read(d->i915, d->reg_post);
907b28c5
CW
108}
109
05a2fb15 110static void
48c1026a 111fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
907b28c5 112{
05a2fb15 113 struct intel_uncore_forcewake_domain *d;
907b28c5 114
33c582c1 115 for_each_fw_domain_masked(d, fw_domains, dev_priv) {
05a2fb15
MK
116 fw_domain_wait_ack_clear(d);
117 fw_domain_get(d);
05a2fb15 118 }
4e1176dd
TU
119
120 for_each_fw_domain_masked(d, fw_domains, dev_priv)
121 fw_domain_wait_ack(d);
05a2fb15 122}
907b28c5 123
05a2fb15 124static void
48c1026a 125fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
05a2fb15
MK
126{
127 struct intel_uncore_forcewake_domain *d;
907b28c5 128
33c582c1 129 for_each_fw_domain_masked(d, fw_domains, dev_priv) {
05a2fb15
MK
130 fw_domain_put(d);
131 fw_domain_posting_read(d);
132 }
133}
907b28c5 134
05a2fb15
MK
135static void
136fw_domains_posting_read(struct drm_i915_private *dev_priv)
137{
138 struct intel_uncore_forcewake_domain *d;
05a2fb15
MK
139
140 /* No need to do for all, just do for first found */
33c582c1 141 for_each_fw_domain(d, dev_priv) {
05a2fb15
MK
142 fw_domain_posting_read(d);
143 break;
144 }
145}
146
147static void
48c1026a 148fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
05a2fb15
MK
149{
150 struct intel_uncore_forcewake_domain *d;
05a2fb15 151
3225b2f9
MK
152 if (dev_priv->uncore.fw_domains == 0)
153 return;
f9b3927a 154
33c582c1 155 for_each_fw_domain_masked(d, fw_domains, dev_priv)
05a2fb15
MK
156 fw_domain_reset(d);
157
158 fw_domains_posting_read(dev_priv);
159}
160
161static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
162{
163 /* w/a for a sporadic read returning 0 by waiting for the GT
164 * thread to wake up.
165 */
166 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
167 GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
168 DRM_ERROR("GT thread status wait timed out\n");
169}
170
171static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
48c1026a 172 enum forcewake_domains fw_domains)
05a2fb15
MK
173{
174 fw_domains_get(dev_priv, fw_domains);
907b28c5 175
05a2fb15 176 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
c549f738 177 __gen6_gt_wait_for_thread_c0(dev_priv);
907b28c5
CW
178}
179
180static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
181{
182 u32 gtfifodbg;
6af5d92f
CW
183
184 gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
90f256b5
VS
185 if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
186 __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
907b28c5
CW
187}
188
05a2fb15 189static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
48c1026a 190 enum forcewake_domains fw_domains)
907b28c5 191{
05a2fb15 192 fw_domains_put(dev_priv, fw_domains);
907b28c5
CW
193 gen6_gt_check_fifodbg(dev_priv);
194}
195
c32e3788
DG
196static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
197{
198 u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
199
200 return count & GT_FIFO_FREE_ENTRIES_MASK;
201}
202
907b28c5
CW
203static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
204{
205 int ret = 0;
206
5135d64b
D
207 /* On VLV, FIFO will be shared by both SW and HW.
208 * So, we need to read the FREE_ENTRIES everytime */
2d1fe073 209 if (IS_VALLEYVIEW(dev_priv))
c32e3788 210 dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
5135d64b 211
907b28c5
CW
212 if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
213 int loop = 500;
c32e3788
DG
214 u32 fifo = fifo_free_entries(dev_priv);
215
907b28c5
CW
216 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
217 udelay(10);
c32e3788 218 fifo = fifo_free_entries(dev_priv);
907b28c5
CW
219 }
220 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
221 ++ret;
222 dev_priv->uncore.fifo_count = fifo;
223 }
224 dev_priv->uncore.fifo_count--;
225
226 return ret;
227}
228
a57a4a67
TU
229static enum hrtimer_restart
230intel_uncore_fw_release_timer(struct hrtimer *timer)
38cff0b1 231{
a57a4a67
TU
232 struct intel_uncore_forcewake_domain *domain =
233 container_of(timer, struct intel_uncore_forcewake_domain, timer);
003342a5 234 struct drm_i915_private *dev_priv = domain->i915;
b2cff0db 235 unsigned long irqflags;
38cff0b1 236
003342a5 237 assert_rpm_device_not_suspended(dev_priv);
38cff0b1 238
003342a5 239 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
b2cff0db
CW
240 if (WARN_ON(domain->wake_count == 0))
241 domain->wake_count++;
242
003342a5
TU
243 if (--domain->wake_count == 0) {
244 dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
245 dev_priv->uncore.fw_domains_active &= ~domain->mask;
246 }
b2cff0db 247
003342a5 248 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
a57a4a67
TU
249
250 return HRTIMER_NORESTART;
38cff0b1
ZW
251}
252
dc97997a
CW
253void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
254 bool restore)
38cff0b1 255{
48c1026a 256 unsigned long irqflags;
b2cff0db 257 struct intel_uncore_forcewake_domain *domain;
48c1026a 258 int retry_count = 100;
003342a5 259 enum forcewake_domains fw, active_domains;
38cff0b1 260
b2cff0db
CW
261 /* Hold uncore.lock across reset to prevent any register access
262 * with forcewake not set correctly. Wait until all pending
263 * timers are run before holding.
264 */
265 while (1) {
266 active_domains = 0;
38cff0b1 267
33c582c1 268 for_each_fw_domain(domain, dev_priv) {
a57a4a67 269 if (hrtimer_cancel(&domain->timer) == 0)
b2cff0db 270 continue;
38cff0b1 271
a57a4a67 272 intel_uncore_fw_release_timer(&domain->timer);
b2cff0db 273 }
aec347ab 274
b2cff0db 275 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
b2ec142c 276
33c582c1 277 for_each_fw_domain(domain, dev_priv) {
a57a4a67 278 if (hrtimer_active(&domain->timer))
33c582c1 279 active_domains |= domain->mask;
b2cff0db 280 }
3123fcaf 281
b2cff0db
CW
282 if (active_domains == 0)
283 break;
aec347ab 284
b2cff0db
CW
285 if (--retry_count == 0) {
286 DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
287 break;
288 }
0294ae7b 289
b2cff0db
CW
290 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
291 cond_resched();
292 }
0294ae7b 293
b2cff0db
CW
294 WARN_ON(active_domains);
295
003342a5 296 fw = dev_priv->uncore.fw_domains_active;
b2cff0db
CW
297 if (fw)
298 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
ef46e0d2 299
05a2fb15 300 fw_domains_reset(dev_priv, FORCEWAKE_ALL);
38cff0b1 301
0294ae7b 302 if (restore) { /* If reset with a user forcewake, try to restore */
0294ae7b
CW
303 if (fw)
304 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
305
dc97997a 306 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
0294ae7b 307 dev_priv->uncore.fifo_count =
c32e3788 308 fifo_free_entries(dev_priv);
0294ae7b
CW
309 }
310
b2cff0db 311 if (!restore)
59bad947 312 assert_forcewakes_inactive(dev_priv);
b2cff0db 313
0294ae7b 314 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
ef46e0d2
DV
315}
316
c02e85a0
MK
317static u64 gen9_edram_size(struct drm_i915_private *dev_priv)
318{
319 const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
320 const unsigned int sets[4] = { 1, 1, 2, 2 };
321 const u32 cap = dev_priv->edram_cap;
322
323 return EDRAM_NUM_BANKS(cap) *
324 ways[EDRAM_WAYS_IDX(cap)] *
325 sets[EDRAM_SETS_IDX(cap)] *
326 1024 * 1024;
327}
328
3accaf7e 329u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv)
907b28c5 330{
3accaf7e
MK
331 if (!HAS_EDRAM(dev_priv))
332 return 0;
333
c02e85a0
MK
334 /* The needed capability bits for size calculation
335 * are not there with pre gen9 so return 128MB always.
3accaf7e 336 */
c02e85a0
MK
337 if (INTEL_GEN(dev_priv) < 9)
338 return 128 * 1024 * 1024;
3accaf7e 339
c02e85a0 340 return gen9_edram_size(dev_priv);
3accaf7e 341}
907b28c5 342
3accaf7e
MK
343static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv)
344{
345 if (IS_HASWELL(dev_priv) ||
346 IS_BROADWELL(dev_priv) ||
347 INTEL_GEN(dev_priv) >= 9) {
348 dev_priv->edram_cap = __raw_i915_read32(dev_priv,
349 HSW_EDRAM_CAP);
350
351 /* NB: We can't write IDICR yet because we do not have gt funcs
18ce3994 352 * set up */
3accaf7e
MK
353 } else {
354 dev_priv->edram_cap = 0;
18ce3994 355 }
3accaf7e
MK
356
357 if (HAS_EDRAM(dev_priv))
358 DRM_INFO("Found %lluMB of eDRAM\n",
359 intel_uncore_edram_size(dev_priv) / (1024 * 1024));
f9b3927a
MK
360}
361
8a47eb19 362static bool
8ac3e1bb 363fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
8a47eb19
MK
364{
365 u32 dbg;
366
8a47eb19
MK
367 dbg = __raw_i915_read32(dev_priv, FPGA_DBG);
368 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
369 return false;
370
371 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
372
373 return true;
374}
375
8ac3e1bb
MK
376static bool
377vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
378{
379 u32 cer;
380
381 cer = __raw_i915_read32(dev_priv, CLAIM_ER);
382 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
383 return false;
384
385 __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR);
386
387 return true;
388}
389
390static bool
391check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
392{
393 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
394 return fpga_check_for_unclaimed_mmio(dev_priv);
395
396 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
397 return vlv_check_for_unclaimed_mmio(dev_priv);
398
399 return false;
400}
401
dc97997a 402static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
f9b3927a
MK
403 bool restore_forcewake)
404{
8a47eb19
MK
405 /* clear out unclaimed reg detection bit */
406 if (check_for_unclaimed_mmio(dev_priv))
407 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
907b28c5 408
97058870 409 /* clear out old GT FIFO errors */
dc97997a 410 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
97058870
VS
411 __raw_i915_write32(dev_priv, GTFIFODBG,
412 __raw_i915_read32(dev_priv, GTFIFODBG));
413
a04f90a3 414 /* WaDisableShadowRegForCpd:chv */
dc97997a 415 if (IS_CHERRYVIEW(dev_priv)) {
a04f90a3
D
416 __raw_i915_write32(dev_priv, GTFIFOCTL,
417 __raw_i915_read32(dev_priv, GTFIFOCTL) |
418 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
419 GT_FIFO_CTL_RC6_POLICY_STALL);
420 }
421
dc97997a 422 intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
521198a2
MK
423}
424
dc97997a
CW
425void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
426 bool restore_forcewake)
ed493883 427{
dc97997a
CW
428 __intel_uncore_early_sanitize(dev_priv, restore_forcewake);
429 i915_check_and_clear_faults(dev_priv);
ed493883
ID
430}
431
dc97997a 432void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
521198a2 433{
dc97997a 434 i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6);
274008e8 435
907b28c5 436 /* BIOS often leaves RC6 enabled, but disable it for hw init */
54b4f68f 437 intel_sanitize_gt_powersave(dev_priv);
907b28c5
CW
438}
439
a6111f7b
CW
440static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
441 enum forcewake_domains fw_domains)
442{
443 struct intel_uncore_forcewake_domain *domain;
a6111f7b 444
a6111f7b
CW
445 fw_domains &= dev_priv->uncore.fw_domains;
446
33c582c1 447 for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
a6111f7b 448 if (domain->wake_count++)
33c582c1 449 fw_domains &= ~domain->mask;
a6111f7b
CW
450 }
451
003342a5 452 if (fw_domains) {
a6111f7b 453 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
003342a5
TU
454 dev_priv->uncore.fw_domains_active |= fw_domains;
455 }
a6111f7b
CW
456}
457
59bad947
MK
458/**
459 * intel_uncore_forcewake_get - grab forcewake domain references
460 * @dev_priv: i915 device instance
461 * @fw_domains: forcewake domains to get reference on
462 *
463 * This function can be used get GT's forcewake domain references.
464 * Normal register access will handle the forcewake domains automatically.
465 * However if some sequence requires the GT to not power down a particular
466 * forcewake domains this function should be called at the beginning of the
467 * sequence. And subsequently the reference should be dropped by symmetric
468 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
469 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
907b28c5 470 */
59bad947 471void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
48c1026a 472 enum forcewake_domains fw_domains)
907b28c5
CW
473{
474 unsigned long irqflags;
475
ab484f8f
BW
476 if (!dev_priv->uncore.funcs.force_wake_get)
477 return;
478
c9b8846a 479 assert_rpm_wakelock_held(dev_priv);
c8c8fb33 480
6daccb0b 481 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
a6111f7b 482 __intel_uncore_forcewake_get(dev_priv, fw_domains);
907b28c5
CW
483 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
484}
485
59bad947 486/**
a6111f7b 487 * intel_uncore_forcewake_get__locked - grab forcewake domain references
59bad947 488 * @dev_priv: i915 device instance
a6111f7b 489 * @fw_domains: forcewake domains to get reference on
59bad947 490 *
a6111f7b
CW
491 * See intel_uncore_forcewake_get(). This variant places the onus
492 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
907b28c5 493 */
a6111f7b
CW
494void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
495 enum forcewake_domains fw_domains)
496{
497 assert_spin_locked(&dev_priv->uncore.lock);
498
499 if (!dev_priv->uncore.funcs.force_wake_get)
500 return;
501
502 __intel_uncore_forcewake_get(dev_priv, fw_domains);
503}
504
505static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
506 enum forcewake_domains fw_domains)
907b28c5 507{
b2cff0db 508 struct intel_uncore_forcewake_domain *domain;
907b28c5 509
b2cff0db
CW
510 fw_domains &= dev_priv->uncore.fw_domains;
511
33c582c1 512 for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
b2cff0db
CW
513 if (WARN_ON(domain->wake_count == 0))
514 continue;
515
516 if (--domain->wake_count)
517 continue;
518
05a2fb15 519 fw_domain_arm_timer(domain);
aec347ab 520 }
a6111f7b 521}
dc9fb09c 522
a6111f7b
CW
523/**
524 * intel_uncore_forcewake_put - release a forcewake domain reference
525 * @dev_priv: i915 device instance
526 * @fw_domains: forcewake domains to put references
527 *
528 * This function drops the device-level forcewakes for specified
529 * domains obtained by intel_uncore_forcewake_get().
530 */
531void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
532 enum forcewake_domains fw_domains)
533{
534 unsigned long irqflags;
535
536 if (!dev_priv->uncore.funcs.force_wake_put)
537 return;
538
539 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
540 __intel_uncore_forcewake_put(dev_priv, fw_domains);
907b28c5
CW
541 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
542}
543
a6111f7b
CW
544/**
545 * intel_uncore_forcewake_put__locked - grab forcewake domain references
546 * @dev_priv: i915 device instance
547 * @fw_domains: forcewake domains to get reference on
548 *
549 * See intel_uncore_forcewake_put(). This variant places the onus
550 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
551 */
552void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
553 enum forcewake_domains fw_domains)
554{
555 assert_spin_locked(&dev_priv->uncore.lock);
556
557 if (!dev_priv->uncore.funcs.force_wake_put)
558 return;
559
560 __intel_uncore_forcewake_put(dev_priv, fw_domains);
561}
562
59bad947 563void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
e998c40f
PZ
564{
565 if (!dev_priv->uncore.funcs.force_wake_get)
566 return;
567
003342a5 568 WARN_ON(dev_priv->uncore.fw_domains_active);
e998c40f
PZ
569}
570
907b28c5 571/* We give fast paths for the really cool registers */
40181697 572#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
907b28c5 573
6863b76c
TU
574#define __gen6_reg_read_fw_domains(offset) \
575({ \
576 enum forcewake_domains __fwd; \
577 if (NEEDS_FORCE_WAKE(offset)) \
578 __fwd = FORCEWAKE_RENDER; \
579 else \
580 __fwd = 0; \
581 __fwd; \
582})
583
9480dbf0 584static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
91e630b9 585{
91e630b9
TU
586 if (offset < entry->start)
587 return -1;
588 else if (offset > entry->end)
589 return 1;
590 else
591 return 0;
592}
593
9480dbf0
TU
594/* Copied and "macroized" from lib/bsearch.c */
595#define BSEARCH(key, base, num, cmp) ({ \
596 unsigned int start__ = 0, end__ = (num); \
597 typeof(base) result__ = NULL; \
598 while (start__ < end__) { \
599 unsigned int mid__ = start__ + (end__ - start__) / 2; \
600 int ret__ = (cmp)((key), (base) + mid__); \
601 if (ret__ < 0) { \
602 end__ = mid__; \
603 } else if (ret__ > 0) { \
604 start__ = mid__ + 1; \
605 } else { \
606 result__ = (base) + mid__; \
607 break; \
608 } \
609 } \
610 result__; \
611})
612
9fc1117c 613static enum forcewake_domains
15157970 614find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
9fc1117c 615{
9480dbf0 616 const struct intel_forcewake_range *entry;
9fc1117c 617
9480dbf0
TU
618 entry = BSEARCH(offset,
619 dev_priv->uncore.fw_domains_table,
620 dev_priv->uncore.fw_domains_table_entries,
91e630b9 621 fw_range_cmp);
38fb6a40 622
0dd356bb 623 return entry ? entry->domains : 0;
9fc1117c
TU
624}
625
b0081239 626static void
15157970 627intel_fw_table_check(struct drm_i915_private *dev_priv)
b0081239 628{
15157970
TU
629 const struct intel_forcewake_range *ranges;
630 unsigned int num_ranges;
b0081239
TU
631 s32 prev;
632 unsigned int i;
633
634 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG))
635 return;
636
15157970
TU
637 ranges = dev_priv->uncore.fw_domains_table;
638 if (!ranges)
639 return;
640
641 num_ranges = dev_priv->uncore.fw_domains_table_entries;
642
b0081239
TU
643 for (i = 0, prev = -1; i < num_ranges; i++, ranges++) {
644 WARN_ON_ONCE(prev >= (s32)ranges->start);
645 prev = ranges->start;
646 WARN_ON_ONCE(prev >= (s32)ranges->end);
647 prev = ranges->end;
648 }
649}
650
9fc1117c
TU
651#define GEN_FW_RANGE(s, e, d) \
652 { .start = (s), .end = (e), .domains = (d) }
1938e59a 653
895833bd
TU
654#define HAS_FWTABLE(dev_priv) \
655 (IS_GEN9(dev_priv) || \
656 IS_CHERRYVIEW(dev_priv) || \
657 IS_VALLEYVIEW(dev_priv))
658
b0081239 659/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
9fc1117c
TU
660static const struct intel_forcewake_range __vlv_fw_ranges[] = {
661 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
662 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
663 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
9fc1117c
TU
664 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
665 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
b0081239 666 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
9fc1117c
TU
667 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
668};
1938e59a 669
895833bd 670#define __fwtable_reg_read_fw_domains(offset) \
6863b76c
TU
671({ \
672 enum forcewake_domains __fwd = 0; \
0dd356bb 673 if (NEEDS_FORCE_WAKE((offset))) \
15157970 674 __fwd = find_fw_domain(dev_priv, offset); \
6863b76c
TU
675 __fwd; \
676})
677
47188574 678/* *Must* be sorted by offset! See intel_shadow_table_check(). */
6863b76c 679static const i915_reg_t gen8_shadowed_regs[] = {
47188574
TU
680 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
681 GEN6_RPNSWREQ, /* 0xA008 */
682 GEN6_RC_VIDEO_FREQ, /* 0xA00C */
683 RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */
684 RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */
685 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
6863b76c
TU
686 /* TODO: Other registers are not yet used */
687};
688
47188574
TU
689static void intel_shadow_table_check(void)
690{
691 const i915_reg_t *reg = gen8_shadowed_regs;
692 s32 prev;
693 u32 offset;
694 unsigned int i;
695
696 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG))
697 return;
698
699 for (i = 0, prev = -1; i < ARRAY_SIZE(gen8_shadowed_regs); i++, reg++) {
700 offset = i915_mmio_reg_offset(*reg);
701 WARN_ON_ONCE(prev >= (s32)offset);
702 prev = offset;
703 }
704}
705
9480dbf0 706static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
5a659383 707{
9480dbf0 708 u32 offset = i915_mmio_reg_offset(*reg);
5a659383 709
9480dbf0 710 if (key < offset)
5a659383 711 return -1;
9480dbf0 712 else if (key > offset)
5a659383
TU
713 return 1;
714 else
715 return 0;
716}
717
6863b76c
TU
718static bool is_gen8_shadowed(u32 offset)
719{
9480dbf0 720 const i915_reg_t *regs = gen8_shadowed_regs;
5a659383 721
9480dbf0
TU
722 return BSEARCH(offset, regs, ARRAY_SIZE(gen8_shadowed_regs),
723 mmio_reg_cmp);
6863b76c
TU
724}
725
726#define __gen8_reg_write_fw_domains(offset) \
727({ \
728 enum forcewake_domains __fwd; \
729 if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
730 __fwd = FORCEWAKE_RENDER; \
731 else \
732 __fwd = 0; \
733 __fwd; \
734})
735
b0081239 736/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
9fc1117c
TU
737static const struct intel_forcewake_range __chv_fw_ranges[] = {
738 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
b0081239 739 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c 740 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
b0081239 741 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c 742 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
b0081239 743 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c 744 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
b0081239
TU
745 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
746 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
9fc1117c 747 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
b0081239
TU
748 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
749 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c
TU
750 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
751 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
752 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
753 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
9fc1117c 754};
38fb6a40 755
22d48c55 756#define __fwtable_reg_write_fw_domains(offset) \
6863b76c
TU
757({ \
758 enum forcewake_domains __fwd = 0; \
0dd356bb 759 if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
15157970 760 __fwd = find_fw_domain(dev_priv, offset); \
6863b76c
TU
761 __fwd; \
762})
763
b0081239 764/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
9fc1117c 765static const struct intel_forcewake_range __gen9_fw_ranges[] = {
0dd356bb 766 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
9fc1117c
TU
767 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
768 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
0dd356bb 769 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
9fc1117c 770 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
0dd356bb 771 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
9fc1117c 772 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
0dd356bb 773 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER),
b0081239 774 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
9fc1117c 775 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
0dd356bb 776 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
9fc1117c 777 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
0dd356bb 778 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
b0081239 779 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
0dd356bb 780 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER),
9fc1117c 781 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
0dd356bb 782 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
9fc1117c 783 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
0dd356bb 784 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
b0081239 785 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
0dd356bb 786 GEN_FW_RANGE(0xb480, 0xbfff, FORCEWAKE_BLITTER),
9fc1117c 787 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
0dd356bb 788 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER),
b0081239 789 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
0dd356bb 790 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER),
9fc1117c 791 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
0dd356bb 792 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER),
9fc1117c 793 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
0dd356bb 794 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER),
b0081239 795 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
0dd356bb 796 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER),
9fc1117c
TU
797 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
798};
6863b76c 799
907b28c5
CW
800static void
801ilk_dummy_write(struct drm_i915_private *dev_priv)
802{
803 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
804 * the chip from rc6 before touching it for real. MI_MODE is masked,
805 * hence harmless to write 0 into. */
6af5d92f 806 __raw_i915_write32(dev_priv, MI_MODE, 0);
907b28c5
CW
807}
808
809static void
9c053501
MK
810__unclaimed_reg_debug(struct drm_i915_private *dev_priv,
811 const i915_reg_t reg,
812 const bool read,
813 const bool before)
907b28c5 814{
dda96033
CW
815 if (WARN(check_for_unclaimed_mmio(dev_priv) && !before,
816 "Unclaimed %s register 0x%x\n",
817 read ? "read from" : "write to",
4bd0a25d 818 i915_mmio_reg_offset(reg)))
48572edd 819 i915.mmio_debug--; /* Only report the first N failures */
907b28c5
CW
820}
821
9c053501
MK
822static inline void
823unclaimed_reg_debug(struct drm_i915_private *dev_priv,
824 const i915_reg_t reg,
825 const bool read,
826 const bool before)
827{
828 if (likely(!i915.mmio_debug))
829 return;
830
831 __unclaimed_reg_debug(dev_priv, reg, read, before);
832}
833
51f67885 834#define GEN2_READ_HEADER(x) \
5d738795 835 u##x val = 0; \
da5827c3 836 assert_rpm_wakelock_held(dev_priv);
5d738795 837
51f67885 838#define GEN2_READ_FOOTER \
5d738795
BW
839 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
840 return val
841
51f67885 842#define __gen2_read(x) \
0b274481 843static u##x \
f0f59a00 844gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
51f67885 845 GEN2_READ_HEADER(x); \
3967018e 846 val = __raw_i915_read##x(dev_priv, reg); \
51f67885 847 GEN2_READ_FOOTER; \
3967018e
BW
848}
849
850#define __gen5_read(x) \
851static u##x \
f0f59a00 852gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
51f67885 853 GEN2_READ_HEADER(x); \
3967018e
BW
854 ilk_dummy_write(dev_priv); \
855 val = __raw_i915_read##x(dev_priv, reg); \
51f67885 856 GEN2_READ_FOOTER; \
3967018e
BW
857}
858
51f67885
CW
859__gen5_read(8)
860__gen5_read(16)
861__gen5_read(32)
862__gen5_read(64)
863__gen2_read(8)
864__gen2_read(16)
865__gen2_read(32)
866__gen2_read(64)
867
868#undef __gen5_read
869#undef __gen2_read
870
871#undef GEN2_READ_FOOTER
872#undef GEN2_READ_HEADER
873
874#define GEN6_READ_HEADER(x) \
f0f59a00 875 u32 offset = i915_mmio_reg_offset(reg); \
51f67885
CW
876 unsigned long irqflags; \
877 u##x val = 0; \
da5827c3 878 assert_rpm_wakelock_held(dev_priv); \
9c053501
MK
879 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
880 unclaimed_reg_debug(dev_priv, reg, true, true)
51f67885
CW
881
882#define GEN6_READ_FOOTER \
9c053501 883 unclaimed_reg_debug(dev_priv, reg, true, false); \
51f67885
CW
884 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
885 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
886 return val
887
c521b0c8
TU
888static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
889 enum forcewake_domains fw_domains)
b2cff0db
CW
890{
891 struct intel_uncore_forcewake_domain *domain;
b2cff0db 892
c521b0c8
TU
893 for_each_fw_domain_masked(domain, fw_domains, dev_priv)
894 fw_domain_arm_timer(domain);
895
896 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
897 dev_priv->uncore.fw_domains_active |= fw_domains;
898}
899
900static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
901 enum forcewake_domains fw_domains)
902{
b2cff0db
CW
903 if (WARN_ON(!fw_domains))
904 return;
905
003342a5
TU
906 /* Turn on all requested but inactive supported forcewake domains. */
907 fw_domains &= dev_priv->uncore.fw_domains;
908 fw_domains &= ~dev_priv->uncore.fw_domains_active;
b2cff0db 909
c521b0c8
TU
910 if (fw_domains)
911 ___force_wake_auto(dev_priv, fw_domains);
b2cff0db
CW
912}
913
3967018e
BW
914#define __gen6_read(x) \
915static u##x \
f0f59a00 916gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
6863b76c 917 enum forcewake_domains fw_engine; \
51f67885 918 GEN6_READ_HEADER(x); \
6863b76c
TU
919 fw_engine = __gen6_reg_read_fw_domains(offset); \
920 if (fw_engine) \
921 __force_wake_auto(dev_priv, fw_engine); \
dc9fb09c 922 val = __raw_i915_read##x(dev_priv, reg); \
51f67885 923 GEN6_READ_FOOTER; \
907b28c5
CW
924}
925
6044c4a3 926#define __fwtable_read(x) \
940aece4 927static u##x \
6044c4a3 928fwtable_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
6863b76c 929 enum forcewake_domains fw_engine; \
51f67885 930 GEN6_READ_HEADER(x); \
895833bd 931 fw_engine = __fwtable_reg_read_fw_domains(offset); \
6a42d0f4 932 if (fw_engine) \
b208ba8e 933 __force_wake_auto(dev_priv, fw_engine); \
6fe72865 934 val = __raw_i915_read##x(dev_priv, reg); \
51f67885 935 GEN6_READ_FOOTER; \
940aece4
D
936}
937
6044c4a3
TU
938__fwtable_read(8)
939__fwtable_read(16)
940__fwtable_read(32)
941__fwtable_read(64)
3967018e
BW
942__gen6_read(8)
943__gen6_read(16)
944__gen6_read(32)
945__gen6_read(64)
3967018e 946
6044c4a3 947#undef __fwtable_read
3967018e 948#undef __gen6_read
51f67885
CW
949#undef GEN6_READ_FOOTER
950#undef GEN6_READ_HEADER
5d738795 951
8a74db7a
VS
952#define VGPU_READ_HEADER(x) \
953 unsigned long irqflags; \
954 u##x val = 0; \
da5827c3 955 assert_rpm_device_not_suspended(dev_priv); \
8a74db7a
VS
956 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
957
958#define VGPU_READ_FOOTER \
959 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
960 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
961 return val
962
963#define __vgpu_read(x) \
964static u##x \
f0f59a00 965vgpu_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
8a74db7a
VS
966 VGPU_READ_HEADER(x); \
967 val = __raw_i915_read##x(dev_priv, reg); \
968 VGPU_READ_FOOTER; \
969}
970
971__vgpu_read(8)
972__vgpu_read(16)
973__vgpu_read(32)
974__vgpu_read(64)
975
976#undef __vgpu_read
977#undef VGPU_READ_FOOTER
978#undef VGPU_READ_HEADER
979
51f67885 980#define GEN2_WRITE_HEADER \
5d738795 981 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
da5827c3 982 assert_rpm_wakelock_held(dev_priv); \
907b28c5 983
51f67885 984#define GEN2_WRITE_FOOTER
0d965301 985
51f67885 986#define __gen2_write(x) \
0b274481 987static void \
f0f59a00 988gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
51f67885 989 GEN2_WRITE_HEADER; \
4032ef43 990 __raw_i915_write##x(dev_priv, reg, val); \
51f67885 991 GEN2_WRITE_FOOTER; \
4032ef43
BW
992}
993
994#define __gen5_write(x) \
995static void \
f0f59a00 996gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
51f67885 997 GEN2_WRITE_HEADER; \
4032ef43
BW
998 ilk_dummy_write(dev_priv); \
999 __raw_i915_write##x(dev_priv, reg, val); \
51f67885 1000 GEN2_WRITE_FOOTER; \
4032ef43
BW
1001}
1002
51f67885
CW
1003__gen5_write(8)
1004__gen5_write(16)
1005__gen5_write(32)
51f67885
CW
1006__gen2_write(8)
1007__gen2_write(16)
1008__gen2_write(32)
51f67885
CW
1009
1010#undef __gen5_write
1011#undef __gen2_write
1012
1013#undef GEN2_WRITE_FOOTER
1014#undef GEN2_WRITE_HEADER
1015
1016#define GEN6_WRITE_HEADER \
f0f59a00 1017 u32 offset = i915_mmio_reg_offset(reg); \
51f67885
CW
1018 unsigned long irqflags; \
1019 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
da5827c3 1020 assert_rpm_wakelock_held(dev_priv); \
9c053501
MK
1021 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
1022 unclaimed_reg_debug(dev_priv, reg, false, true)
51f67885
CW
1023
1024#define GEN6_WRITE_FOOTER \
9c053501 1025 unclaimed_reg_debug(dev_priv, reg, false, false); \
51f67885
CW
1026 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
1027
4032ef43
BW
1028#define __gen6_write(x) \
1029static void \
f0f59a00 1030gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
4032ef43 1031 u32 __fifo_ret = 0; \
51f67885 1032 GEN6_WRITE_HEADER; \
0670c5a6 1033 if (NEEDS_FORCE_WAKE(offset)) { \
4032ef43
BW
1034 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
1035 } \
1036 __raw_i915_write##x(dev_priv, reg, val); \
1037 if (unlikely(__fifo_ret)) { \
1038 gen6_gt_check_fifodbg(dev_priv); \
1039 } \
51f67885 1040 GEN6_WRITE_FOOTER; \
4032ef43
BW
1041}
1042
ab2aa47e
BW
1043#define __gen8_write(x) \
1044static void \
f0f59a00 1045gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
6863b76c 1046 enum forcewake_domains fw_engine; \
51f67885 1047 GEN6_WRITE_HEADER; \
6863b76c
TU
1048 fw_engine = __gen8_reg_write_fw_domains(offset); \
1049 if (fw_engine) \
1050 __force_wake_auto(dev_priv, fw_engine); \
b2cff0db 1051 __raw_i915_write##x(dev_priv, reg, val); \
51f67885 1052 GEN6_WRITE_FOOTER; \
ab2aa47e
BW
1053}
1054
22d48c55 1055#define __fwtable_write(x) \
1938e59a 1056static void \
22d48c55 1057fwtable_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
6863b76c 1058 enum forcewake_domains fw_engine; \
51f67885 1059 GEN6_WRITE_HEADER; \
22d48c55 1060 fw_engine = __fwtable_reg_write_fw_domains(offset); \
6a42d0f4 1061 if (fw_engine) \
b208ba8e 1062 __force_wake_auto(dev_priv, fw_engine); \
1938e59a 1063 __raw_i915_write##x(dev_priv, reg, val); \
51f67885 1064 GEN6_WRITE_FOOTER; \
1938e59a
D
1065}
1066
22d48c55
TU
1067__fwtable_write(8)
1068__fwtable_write(16)
1069__fwtable_write(32)
ab2aa47e
BW
1070__gen8_write(8)
1071__gen8_write(16)
1072__gen8_write(32)
4032ef43
BW
1073__gen6_write(8)
1074__gen6_write(16)
1075__gen6_write(32)
4032ef43 1076
22d48c55 1077#undef __fwtable_write
ab2aa47e 1078#undef __gen8_write
4032ef43 1079#undef __gen6_write
51f67885
CW
1080#undef GEN6_WRITE_FOOTER
1081#undef GEN6_WRITE_HEADER
907b28c5 1082
8a74db7a
VS
1083#define VGPU_WRITE_HEADER \
1084 unsigned long irqflags; \
1085 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
da5827c3 1086 assert_rpm_device_not_suspended(dev_priv); \
8a74db7a
VS
1087 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
1088
1089#define VGPU_WRITE_FOOTER \
1090 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
1091
1092#define __vgpu_write(x) \
1093static void vgpu_write##x(struct drm_i915_private *dev_priv, \
f0f59a00 1094 i915_reg_t reg, u##x val, bool trace) { \
8a74db7a
VS
1095 VGPU_WRITE_HEADER; \
1096 __raw_i915_write##x(dev_priv, reg, val); \
1097 VGPU_WRITE_FOOTER; \
1098}
1099
1100__vgpu_write(8)
1101__vgpu_write(16)
1102__vgpu_write(32)
8a74db7a
VS
1103
1104#undef __vgpu_write
1105#undef VGPU_WRITE_FOOTER
1106#undef VGPU_WRITE_HEADER
1107
43d942a7
YZ
1108#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
1109do { \
1110 dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
1111 dev_priv->uncore.funcs.mmio_writew = x##_write16; \
1112 dev_priv->uncore.funcs.mmio_writel = x##_write32; \
43d942a7
YZ
1113} while (0)
1114
1115#define ASSIGN_READ_MMIO_VFUNCS(x) \
1116do { \
1117 dev_priv->uncore.funcs.mmio_readb = x##_read8; \
1118 dev_priv->uncore.funcs.mmio_readw = x##_read16; \
1119 dev_priv->uncore.funcs.mmio_readl = x##_read32; \
1120 dev_priv->uncore.funcs.mmio_readq = x##_read64; \
1121} while (0)
1122
05a2fb15
MK
1123
1124static void fw_domain_init(struct drm_i915_private *dev_priv,
48c1026a 1125 enum forcewake_domain_id domain_id,
f0f59a00
VS
1126 i915_reg_t reg_set,
1127 i915_reg_t reg_ack)
05a2fb15
MK
1128{
1129 struct intel_uncore_forcewake_domain *d;
1130
1131 if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
1132 return;
1133
1134 d = &dev_priv->uncore.fw_domain[domain_id];
1135
1136 WARN_ON(d->wake_count);
1137
1138 d->wake_count = 0;
1139 d->reg_set = reg_set;
1140 d->reg_ack = reg_ack;
1141
1142 if (IS_GEN6(dev_priv)) {
1143 d->val_reset = 0;
1144 d->val_set = FORCEWAKE_KERNEL;
1145 d->val_clear = 0;
1146 } else {
8543747c 1147 /* WaRsClearFWBitsAtReset:bdw,skl */
05a2fb15
MK
1148 d->val_reset = _MASKED_BIT_DISABLE(0xffff);
1149 d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
1150 d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
1151 }
1152
666a4537 1153 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
05a2fb15
MK
1154 d->reg_post = FORCEWAKE_ACK_VLV;
1155 else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
1156 d->reg_post = ECOBUS;
05a2fb15
MK
1157
1158 d->i915 = dev_priv;
1159 d->id = domain_id;
1160
33c582c1
TU
1161 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
1162 BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
1163 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
1164
1165 d->mask = 1 << domain_id;
1166
a57a4a67
TU
1167 hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1168 d->timer.function = intel_uncore_fw_release_timer;
05a2fb15
MK
1169
1170 dev_priv->uncore.fw_domains |= (1 << domain_id);
f9b3927a
MK
1171
1172 fw_domain_reset(d);
05a2fb15
MK
1173}
1174
dc97997a 1175static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
0b274481 1176{
2d1fe073 1177 if (INTEL_INFO(dev_priv)->gen <= 5)
3225b2f9
MK
1178 return;
1179
dc97997a 1180 if (IS_GEN9(dev_priv)) {
05a2fb15
MK
1181 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1182 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1183 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1184 FORCEWAKE_RENDER_GEN9,
1185 FORCEWAKE_ACK_RENDER_GEN9);
1186 fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
1187 FORCEWAKE_BLITTER_GEN9,
1188 FORCEWAKE_ACK_BLITTER_GEN9);
1189 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1190 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
dc97997a 1191 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
05a2fb15 1192 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
dc97997a 1193 if (!IS_CHERRYVIEW(dev_priv))
756c349d
MK
1194 dev_priv->uncore.funcs.force_wake_put =
1195 fw_domains_put_with_fifo;
1196 else
1197 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
05a2fb15
MK
1198 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1199 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1200 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1201 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
dc97997a 1202 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
05a2fb15
MK
1203 dev_priv->uncore.funcs.force_wake_get =
1204 fw_domains_get_with_thread_status;
dc97997a 1205 if (IS_HASWELL(dev_priv))
3d7d0c85
VS
1206 dev_priv->uncore.funcs.force_wake_put =
1207 fw_domains_put_with_fifo;
1208 else
1209 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
05a2fb15
MK
1210 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1211 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
dc97997a 1212 } else if (IS_IVYBRIDGE(dev_priv)) {
0b274481
BW
1213 u32 ecobus;
1214
1215 /* IVB configs may use multi-threaded forcewake */
1216
1217 /* A small trick here - if the bios hasn't configured
1218 * MT forcewake, and if the device is in RC6, then
1219 * force_wake_mt_get will not wake the device and the
1220 * ECOBUS read will return zero. Which will be
1221 * (correctly) interpreted by the test below as MT
1222 * forcewake being disabled.
1223 */
05a2fb15
MK
1224 dev_priv->uncore.funcs.force_wake_get =
1225 fw_domains_get_with_thread_status;
1226 dev_priv->uncore.funcs.force_wake_put =
1227 fw_domains_put_with_fifo;
1228
f9b3927a
MK
1229 /* We need to init first for ECOBUS access and then
1230 * determine later if we want to reinit, in case of MT access is
6ea2556f
MK
1231 * not working. In this stage we don't know which flavour this
1232 * ivb is, so it is better to reset also the gen6 fw registers
1233 * before the ecobus check.
f9b3927a 1234 */
6ea2556f
MK
1235
1236 __raw_i915_write32(dev_priv, FORCEWAKE, 0);
1237 __raw_posting_read(dev_priv, ECOBUS);
1238
05a2fb15
MK
1239 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1240 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
f9b3927a 1241
556ab7a6 1242 spin_lock_irq(&dev_priv->uncore.lock);
05a2fb15 1243 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
0b274481 1244 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
05a2fb15 1245 fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
556ab7a6 1246 spin_unlock_irq(&dev_priv->uncore.lock);
0b274481 1247
05a2fb15 1248 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
0b274481
BW
1249 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1250 DRM_INFO("when using vblank-synced partial screen updates.\n");
05a2fb15
MK
1251 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1252 FORCEWAKE, FORCEWAKE_ACK);
0b274481 1253 }
dc97997a 1254 } else if (IS_GEN6(dev_priv)) {
0b274481 1255 dev_priv->uncore.funcs.force_wake_get =
05a2fb15 1256 fw_domains_get_with_thread_status;
0b274481 1257 dev_priv->uncore.funcs.force_wake_put =
05a2fb15
MK
1258 fw_domains_put_with_fifo;
1259 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1260 FORCEWAKE, FORCEWAKE_ACK);
0b274481 1261 }
3225b2f9
MK
1262
1263 /* All future platforms are expected to require complex power gating */
1264 WARN_ON(dev_priv->uncore.fw_domains == 0);
f9b3927a
MK
1265}
1266
15157970
TU
1267#define ASSIGN_FW_DOMAINS_TABLE(d) \
1268{ \
1269 dev_priv->uncore.fw_domains_table = \
1270 (struct intel_forcewake_range *)(d); \
1271 dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \
1272}
1273
dc97997a 1274void intel_uncore_init(struct drm_i915_private *dev_priv)
f9b3927a 1275{
dc97997a 1276 i915_check_vgpu(dev_priv);
cf9d2890 1277
3accaf7e 1278 intel_uncore_edram_detect(dev_priv);
dc97997a
CW
1279 intel_uncore_fw_domains_init(dev_priv);
1280 __intel_uncore_early_sanitize(dev_priv, false);
0b274481 1281
75714940
MK
1282 dev_priv->uncore.unclaimed_mmio_check = 1;
1283
dc97997a 1284 switch (INTEL_INFO(dev_priv)->gen) {
ab2aa47e 1285 default:
4597a88a 1286 case 9:
15157970 1287 ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
22d48c55 1288 ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
6044c4a3 1289 ASSIGN_READ_MMIO_VFUNCS(fwtable);
4597a88a
ZW
1290 break;
1291 case 8:
dc97997a 1292 if (IS_CHERRYVIEW(dev_priv)) {
15157970 1293 ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
22d48c55 1294 ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
6044c4a3 1295 ASSIGN_READ_MMIO_VFUNCS(fwtable);
1938e59a
D
1296
1297 } else {
43d942a7
YZ
1298 ASSIGN_WRITE_MMIO_VFUNCS(gen8);
1299 ASSIGN_READ_MMIO_VFUNCS(gen6);
1938e59a 1300 }
ab2aa47e 1301 break;
3967018e
BW
1302 case 7:
1303 case 6:
e9b825f4 1304 ASSIGN_WRITE_MMIO_VFUNCS(gen6);
940aece4 1305
dc97997a 1306 if (IS_VALLEYVIEW(dev_priv)) {
15157970 1307 ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges);
6044c4a3 1308 ASSIGN_READ_MMIO_VFUNCS(fwtable);
940aece4 1309 } else {
43d942a7 1310 ASSIGN_READ_MMIO_VFUNCS(gen6);
940aece4 1311 }
3967018e
BW
1312 break;
1313 case 5:
43d942a7
YZ
1314 ASSIGN_WRITE_MMIO_VFUNCS(gen5);
1315 ASSIGN_READ_MMIO_VFUNCS(gen5);
3967018e
BW
1316 break;
1317 case 4:
1318 case 3:
1319 case 2:
51f67885
CW
1320 ASSIGN_WRITE_MMIO_VFUNCS(gen2);
1321 ASSIGN_READ_MMIO_VFUNCS(gen2);
3967018e
BW
1322 break;
1323 }
ed493883 1324
15157970 1325 intel_fw_table_check(dev_priv);
47188574
TU
1326 if (INTEL_GEN(dev_priv) >= 8)
1327 intel_shadow_table_check();
15157970 1328
c033666a 1329 if (intel_vgpu_active(dev_priv)) {
3be0bf5a
YZ
1330 ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
1331 ASSIGN_READ_MMIO_VFUNCS(vgpu);
1332 }
1333
dc97997a 1334 i915_check_and_clear_faults(dev_priv);
0b274481 1335}
43d942a7
YZ
1336#undef ASSIGN_WRITE_MMIO_VFUNCS
1337#undef ASSIGN_READ_MMIO_VFUNCS
0b274481 1338
dc97997a 1339void intel_uncore_fini(struct drm_i915_private *dev_priv)
0b274481 1340{
0b274481 1341 /* Paranoia: make sure we have disabled everything before we exit. */
dc97997a
CW
1342 intel_uncore_sanitize(dev_priv);
1343 intel_uncore_forcewake_reset(dev_priv, false);
0b274481
BW
1344}
1345
ae5702d2 1346#define GEN_RANGE(l, h) GENMASK((h) - 1, (l) - 1)
af76ae44 1347
907b28c5 1348static const struct register_whitelist {
f0f59a00 1349 i915_reg_t offset_ldw, offset_udw;
907b28c5 1350 uint32_t size;
af76ae44
DL
1351 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1352 uint32_t gen_bitmask;
907b28c5 1353} whitelist[] = {
8697600b
VS
1354 { .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
1355 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
1356 .size = 8, .gen_bitmask = GEN_RANGE(4, 9) },
907b28c5
CW
1357};
1358
1359int i915_reg_read_ioctl(struct drm_device *dev,
1360 void *data, struct drm_file *file)
1361{
fac5e23e 1362 struct drm_i915_private *dev_priv = to_i915(dev);
907b28c5
CW
1363 struct drm_i915_reg_read *reg = data;
1364 struct register_whitelist const *entry = whitelist;
648a9bc5 1365 unsigned size;
f0f59a00 1366 i915_reg_t offset_ldw, offset_udw;
cf67c70f 1367 int i, ret = 0;
907b28c5
CW
1368
1369 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
f0f59a00 1370 if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
ae5702d2 1371 (INTEL_INFO(dev)->gen_mask & entry->gen_bitmask))
907b28c5
CW
1372 break;
1373 }
1374
1375 if (i == ARRAY_SIZE(whitelist))
1376 return -EINVAL;
1377
648a9bc5
CW
1378 /* We use the low bits to encode extra flags as the register should
1379 * be naturally aligned (and those that are not so aligned merely
1380 * limit the available flags for that register).
1381 */
8697600b
VS
1382 offset_ldw = entry->offset_ldw;
1383 offset_udw = entry->offset_udw;
648a9bc5 1384 size = entry->size;
f0f59a00 1385 size |= reg->offset ^ i915_mmio_reg_offset(offset_ldw);
648a9bc5 1386
cf67c70f
PZ
1387 intel_runtime_pm_get(dev_priv);
1388
648a9bc5
CW
1389 switch (size) {
1390 case 8 | 1:
8697600b 1391 reg->val = I915_READ64_2x32(offset_ldw, offset_udw);
648a9bc5 1392 break;
907b28c5 1393 case 8:
8697600b 1394 reg->val = I915_READ64(offset_ldw);
907b28c5
CW
1395 break;
1396 case 4:
8697600b 1397 reg->val = I915_READ(offset_ldw);
907b28c5
CW
1398 break;
1399 case 2:
8697600b 1400 reg->val = I915_READ16(offset_ldw);
907b28c5
CW
1401 break;
1402 case 1:
8697600b 1403 reg->val = I915_READ8(offset_ldw);
907b28c5
CW
1404 break;
1405 default:
cf67c70f
PZ
1406 ret = -EINVAL;
1407 goto out;
907b28c5
CW
1408 }
1409
cf67c70f
PZ
1410out:
1411 intel_runtime_pm_put(dev_priv);
1412 return ret;
907b28c5
CW
1413}
1414
dc97997a 1415static int i915_reset_complete(struct pci_dev *pdev)
907b28c5
CW
1416{
1417 u8 gdrst;
dc97997a 1418 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
73bbf6bd 1419 return (gdrst & GRDOM_RESET_STATUS) == 0;
907b28c5
CW
1420}
1421
dc97997a 1422static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
907b28c5 1423{
91c8a326 1424 struct pci_dev *pdev = dev_priv->drm.pdev;
dc97997a 1425
73bbf6bd 1426 /* assert reset for at least 20 usec */
dc97997a 1427 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
73bbf6bd 1428 udelay(20);
dc97997a 1429 pci_write_config_byte(pdev, I915_GDRST, 0);
907b28c5 1430
dc97997a 1431 return wait_for(i915_reset_complete(pdev), 500);
73bbf6bd
VS
1432}
1433
dc97997a 1434static int g4x_reset_complete(struct pci_dev *pdev)
73bbf6bd
VS
1435{
1436 u8 gdrst;
dc97997a 1437 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
73bbf6bd 1438 return (gdrst & GRDOM_RESET_ENABLE) == 0;
907b28c5
CW
1439}
1440
dc97997a 1441static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
408d4b9e 1442{
91c8a326 1443 struct pci_dev *pdev = dev_priv->drm.pdev;
dc97997a
CW
1444 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1445 return wait_for(g4x_reset_complete(pdev), 500);
408d4b9e
VS
1446}
1447
dc97997a 1448static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
fa4f53c4 1449{
91c8a326 1450 struct pci_dev *pdev = dev_priv->drm.pdev;
fa4f53c4
VS
1451 int ret;
1452
dc97997a 1453 pci_write_config_byte(pdev, I915_GDRST,
fa4f53c4 1454 GRDOM_RENDER | GRDOM_RESET_ENABLE);
dc97997a 1455 ret = wait_for(g4x_reset_complete(pdev), 500);
fa4f53c4
VS
1456 if (ret)
1457 return ret;
1458
1459 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1460 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
1461 POSTING_READ(VDECCLK_GATE_D);
1462
dc97997a 1463 pci_write_config_byte(pdev, I915_GDRST,
fa4f53c4 1464 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
dc97997a 1465 ret = wait_for(g4x_reset_complete(pdev), 500);
fa4f53c4
VS
1466 if (ret)
1467 return ret;
1468
1469 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1470 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1471 POSTING_READ(VDECCLK_GATE_D);
1472
dc97997a 1473 pci_write_config_byte(pdev, I915_GDRST, 0);
fa4f53c4
VS
1474
1475 return 0;
1476}
1477
dc97997a
CW
1478static int ironlake_do_reset(struct drm_i915_private *dev_priv,
1479 unsigned engine_mask)
907b28c5 1480{
907b28c5
CW
1481 int ret;
1482
c039b7f2 1483 I915_WRITE(ILK_GDSR,
0f08ffd6 1484 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
87273b71
CW
1485 ret = intel_wait_for_register(dev_priv,
1486 ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
1487 500);
907b28c5
CW
1488 if (ret)
1489 return ret;
1490
c039b7f2 1491 I915_WRITE(ILK_GDSR,
0f08ffd6 1492 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
87273b71
CW
1493 ret = intel_wait_for_register(dev_priv,
1494 ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
1495 500);
9aa7250f
VS
1496 if (ret)
1497 return ret;
1498
c039b7f2 1499 I915_WRITE(ILK_GDSR, 0);
9aa7250f
VS
1500
1501 return 0;
907b28c5
CW
1502}
1503
ee4b6faf
MK
1504/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
1505static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
1506 u32 hw_domain_mask)
907b28c5 1507{
907b28c5
CW
1508 /* GEN6_GDRST is not in the gt power well, no need to check
1509 * for fifo space for the write or forcewake the chip for
1510 * the read
1511 */
ee4b6faf 1512 __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
907b28c5 1513
ee4b6faf 1514 /* Spin waiting for the device to ack the reset requests */
4a17fe13
CW
1515 return intel_wait_for_register_fw(dev_priv,
1516 GEN6_GDRST, hw_domain_mask, 0,
1517 500);
ee4b6faf
MK
1518}
1519
1520/**
1521 * gen6_reset_engines - reset individual engines
dc97997a 1522 * @dev_priv: i915 device
ee4b6faf
MK
1523 * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
1524 *
1525 * This function will reset the individual engines that are set in engine_mask.
1526 * If you provide ALL_ENGINES as mask, full global domain reset will be issued.
1527 *
1528 * Note: It is responsibility of the caller to handle the difference between
1529 * asking full domain reset versus reset for all available individual engines.
1530 *
1531 * Returns 0 on success, nonzero on error.
1532 */
dc97997a
CW
1533static int gen6_reset_engines(struct drm_i915_private *dev_priv,
1534 unsigned engine_mask)
ee4b6faf 1535{
ee4b6faf
MK
1536 struct intel_engine_cs *engine;
1537 const u32 hw_engine_mask[I915_NUM_ENGINES] = {
1538 [RCS] = GEN6_GRDOM_RENDER,
1539 [BCS] = GEN6_GRDOM_BLT,
1540 [VCS] = GEN6_GRDOM_MEDIA,
1541 [VCS2] = GEN8_GRDOM_MEDIA2,
1542 [VECS] = GEN6_GRDOM_VECS,
1543 };
1544 u32 hw_mask;
1545 int ret;
1546
1547 if (engine_mask == ALL_ENGINES) {
1548 hw_mask = GEN6_GRDOM_FULL;
1549 } else {
bafb0fce
CW
1550 unsigned int tmp;
1551
ee4b6faf 1552 hw_mask = 0;
bafb0fce 1553 for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
ee4b6faf
MK
1554 hw_mask |= hw_engine_mask[engine->id];
1555 }
1556
1557 ret = gen6_hw_domain_reset(dev_priv, hw_mask);
907b28c5 1558
dc97997a 1559 intel_uncore_forcewake_reset(dev_priv, true);
5babf0fc 1560
907b28c5
CW
1561 return ret;
1562}
1563
1758b90e
CW
1564/**
1565 * intel_wait_for_register_fw - wait until register matches expected state
1566 * @dev_priv: the i915 device
1567 * @reg: the register to read
1568 * @mask: mask to apply to register value
1569 * @value: expected value
1570 * @timeout_ms: timeout in millisecond
1571 *
1572 * This routine waits until the target register @reg contains the expected
3d466cd6
DV
1573 * @value after applying the @mask, i.e. it waits until ::
1574 *
1575 * (I915_READ_FW(reg) & mask) == value
1576 *
1758b90e
CW
1577 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
1578 *
1579 * Note that this routine assumes the caller holds forcewake asserted, it is
1580 * not suitable for very long waits. See intel_wait_for_register() if you
1581 * wish to wait without holding forcewake for the duration (i.e. you expect
1582 * the wait to be slow).
1583 *
1584 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1585 */
1586int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
1587 i915_reg_t reg,
1588 const u32 mask,
1589 const u32 value,
1590 const unsigned long timeout_ms)
1591{
1592#define done ((I915_READ_FW(reg) & mask) == value)
1593 int ret = wait_for_us(done, 2);
1594 if (ret)
1595 ret = wait_for(done, timeout_ms);
1596 return ret;
1597#undef done
1598}
1599
1600/**
1601 * intel_wait_for_register - wait until register matches expected state
1602 * @dev_priv: the i915 device
1603 * @reg: the register to read
1604 * @mask: mask to apply to register value
1605 * @value: expected value
1606 * @timeout_ms: timeout in millisecond
1607 *
1608 * This routine waits until the target register @reg contains the expected
3d466cd6
DV
1609 * @value after applying the @mask, i.e. it waits until ::
1610 *
1611 * (I915_READ(reg) & mask) == value
1612 *
1758b90e
CW
1613 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
1614 *
1615 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1616 */
1617int intel_wait_for_register(struct drm_i915_private *dev_priv,
1618 i915_reg_t reg,
1619 const u32 mask,
1620 const u32 value,
1621 const unsigned long timeout_ms)
7fd2d269 1622{
1758b90e
CW
1623
1624 unsigned fw =
1625 intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
1626 int ret;
1627
1628 intel_uncore_forcewake_get(dev_priv, fw);
1629 ret = wait_for_us((I915_READ_FW(reg) & mask) == value, 2);
1630 intel_uncore_forcewake_put(dev_priv, fw);
1631 if (ret)
1632 ret = wait_for((I915_READ_NOTRACE(reg) & mask) == value,
1633 timeout_ms);
1634
1635 return ret;
d431440c
TE
1636}
1637
1638static int gen8_request_engine_reset(struct intel_engine_cs *engine)
1639{
c033666a 1640 struct drm_i915_private *dev_priv = engine->i915;
d431440c 1641 int ret;
d431440c
TE
1642
1643 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1644 _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
1645
1758b90e
CW
1646 ret = intel_wait_for_register_fw(dev_priv,
1647 RING_RESET_CTL(engine->mmio_base),
1648 RESET_CTL_READY_TO_RESET,
1649 RESET_CTL_READY_TO_RESET,
1650 700);
d431440c
TE
1651 if (ret)
1652 DRM_ERROR("%s: reset request timeout\n", engine->name);
1653
1654 return ret;
1655}
1656
1657static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine)
1658{
c033666a 1659 struct drm_i915_private *dev_priv = engine->i915;
d431440c
TE
1660
1661 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1662 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
7fd2d269
MK
1663}
1664
dc97997a
CW
1665static int gen8_reset_engines(struct drm_i915_private *dev_priv,
1666 unsigned engine_mask)
7fd2d269 1667{
7fd2d269 1668 struct intel_engine_cs *engine;
bafb0fce 1669 unsigned int tmp;
7fd2d269 1670
bafb0fce 1671 for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
d431440c 1672 if (gen8_request_engine_reset(engine))
7fd2d269 1673 goto not_ready;
7fd2d269 1674
dc97997a 1675 return gen6_reset_engines(dev_priv, engine_mask);
7fd2d269
MK
1676
1677not_ready:
bafb0fce 1678 for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
d431440c 1679 gen8_unrequest_engine_reset(engine);
7fd2d269
MK
1680
1681 return -EIO;
1682}
1683
dc97997a
CW
1684typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
1685
1686static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
907b28c5 1687{
b1330fbb
CW
1688 if (!i915.reset)
1689 return NULL;
1690
dc97997a 1691 if (INTEL_INFO(dev_priv)->gen >= 8)
ee4b6faf 1692 return gen8_reset_engines;
dc97997a 1693 else if (INTEL_INFO(dev_priv)->gen >= 6)
ee4b6faf 1694 return gen6_reset_engines;
dc97997a 1695 else if (IS_GEN5(dev_priv))
49e4d842 1696 return ironlake_do_reset;
dc97997a 1697 else if (IS_G4X(dev_priv))
49e4d842 1698 return g4x_do_reset;
dc97997a 1699 else if (IS_G33(dev_priv))
49e4d842 1700 return g33_do_reset;
dc97997a 1701 else if (INTEL_INFO(dev_priv)->gen >= 3)
49e4d842 1702 return i915_do_reset;
542c184f 1703 else
49e4d842
CW
1704 return NULL;
1705}
1706
dc97997a 1707int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
49e4d842 1708{
dc97997a 1709 reset_func reset;
99106bc1 1710 int ret;
49e4d842 1711
dc97997a 1712 reset = intel_get_gpu_reset(dev_priv);
49e4d842 1713 if (reset == NULL)
542c184f 1714 return -ENODEV;
49e4d842 1715
99106bc1
MK
1716 /* If the power well sleeps during the reset, the reset
1717 * request may be dropped and never completes (causing -EIO).
1718 */
1719 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
dc97997a 1720 ret = reset(dev_priv, engine_mask);
99106bc1
MK
1721 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1722
1723 return ret;
49e4d842
CW
1724}
1725
dc97997a 1726bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
49e4d842 1727{
dc97997a 1728 return intel_get_gpu_reset(dev_priv) != NULL;
907b28c5
CW
1729}
1730
6b332fa2
AS
1731int intel_guc_reset(struct drm_i915_private *dev_priv)
1732{
1733 int ret;
1734 unsigned long irqflags;
1735
1a3d1898 1736 if (!HAS_GUC(dev_priv))
6b332fa2
AS
1737 return -EINVAL;
1738
1739 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1740 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1741
1742 ret = gen6_hw_domain_reset(dev_priv, GEN9_GRDOM_GUC);
1743
1744 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1745 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1746
1747 return ret;
1748}
1749
fc97618b 1750bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
907b28c5 1751{
fc97618b 1752 return check_for_unclaimed_mmio(dev_priv);
907b28c5 1753}
75714940 1754
bc3b9346 1755bool
75714940
MK
1756intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
1757{
1758 if (unlikely(i915.mmio_debug ||
1759 dev_priv->uncore.unclaimed_mmio_check <= 0))
bc3b9346 1760 return false;
75714940
MK
1761
1762 if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
1763 DRM_DEBUG("Unclaimed register detected, "
1764 "enabling oneshot unclaimed register reporting. "
1765 "Please use i915.mmio_debug=N for more information.\n");
1766 i915.mmio_debug++;
1767 dev_priv->uncore.unclaimed_mmio_check--;
bc3b9346 1768 return true;
75714940 1769 }
bc3b9346
MK
1770
1771 return false;
75714940 1772}
3756685a
TU
1773
1774static enum forcewake_domains
1775intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
1776 i915_reg_t reg)
1777{
895833bd 1778 u32 offset = i915_mmio_reg_offset(reg);
3756685a
TU
1779 enum forcewake_domains fw_domains;
1780
895833bd
TU
1781 if (HAS_FWTABLE(dev_priv)) {
1782 fw_domains = __fwtable_reg_read_fw_domains(offset);
1783 } else if (INTEL_GEN(dev_priv) >= 6) {
1784 fw_domains = __gen6_reg_read_fw_domains(offset);
1785 } else {
1786 WARN_ON(!IS_GEN(dev_priv, 2, 5));
1787 fw_domains = 0;
3756685a
TU
1788 }
1789
1790 WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
1791
1792 return fw_domains;
1793}
1794
1795static enum forcewake_domains
1796intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
1797 i915_reg_t reg)
1798{
22d48c55 1799 u32 offset = i915_mmio_reg_offset(reg);
3756685a
TU
1800 enum forcewake_domains fw_domains;
1801
22d48c55
TU
1802 if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
1803 fw_domains = __fwtable_reg_write_fw_domains(offset);
1804 } else if (IS_GEN8(dev_priv)) {
1805 fw_domains = __gen8_reg_write_fw_domains(offset);
1806 } else if (IS_GEN(dev_priv, 6, 7)) {
3756685a 1807 fw_domains = FORCEWAKE_RENDER;
22d48c55
TU
1808 } else {
1809 WARN_ON(!IS_GEN(dev_priv, 2, 5));
1810 fw_domains = 0;
3756685a
TU
1811 }
1812
1813 WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
1814
1815 return fw_domains;
1816}
1817
1818/**
1819 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
1820 * a register
1821 * @dev_priv: pointer to struct drm_i915_private
1822 * @reg: register in question
1823 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
1824 *
1825 * Returns a set of forcewake domains required to be taken with for example
1826 * intel_uncore_forcewake_get for the specified register to be accessible in the
1827 * specified mode (read, write or read/write) with raw mmio accessors.
1828 *
1829 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
1830 * callers to do FIFO management on their own or risk losing writes.
1831 */
1832enum forcewake_domains
1833intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
1834 i915_reg_t reg, unsigned int op)
1835{
1836 enum forcewake_domains fw_domains = 0;
1837
1838 WARN_ON(!op);
1839
895833bd
TU
1840 if (intel_vgpu_active(dev_priv))
1841 return 0;
1842
3756685a
TU
1843 if (op & FW_REG_READ)
1844 fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg);
1845
1846 if (op & FW_REG_WRITE)
1847 fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg);
1848
1849 return fw_domains;
1850}