drm/i915: Use correct fw_domains during reset
[linux-2.6-block.git] / drivers / gpu / drm / i915 / intel_uncore.c
CommitLineData
907b28c5
CW
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include "i915_drv.h"
25#include "intel_drv.h"
cf9d2890 26#include "i915_vgpu.h"
907b28c5 27
264ec1a8 28#include <asm/iosf_mbi.h>
6daccb0b
CW
29#include <linux/pm_runtime.h>
30
83e33372 31#define FORCEWAKE_ACK_TIMEOUT_MS 50
907b28c5 32
75aa3f63 33#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
6af5d92f 34
05a2fb15
MK
35static const char * const forcewake_domain_names[] = {
36 "render",
37 "blitter",
38 "media",
39};
40
41const char *
48c1026a 42intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
05a2fb15 43{
53abb679 44 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
05a2fb15
MK
45
46 if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
47 return forcewake_domain_names[id];
48
49 WARN_ON(id);
50
51 return "unknown";
52}
53
05a2fb15 54static inline void
577ac4bd
CW
55fw_domain_reset(struct drm_i915_private *i915,
56 const struct intel_uncore_forcewake_domain *d)
907b28c5 57{
f0f59a00 58 WARN_ON(!i915_mmio_reg_valid(d->reg_set));
577ac4bd 59 __raw_i915_write32(i915, d->reg_set, d->val_reset);
907b28c5
CW
60}
61
05a2fb15
MK
62static inline void
63fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
907b28c5 64{
a57a4a67
TU
65 d->wake_count++;
66 hrtimer_start_range_ns(&d->timer,
8b0e1953 67 NSEC_PER_MSEC,
a57a4a67
TU
68 NSEC_PER_MSEC,
69 HRTIMER_MODE_REL);
907b28c5
CW
70}
71
05a2fb15 72static inline void
577ac4bd
CW
73fw_domain_wait_ack_clear(struct drm_i915_private *i915,
74 const struct intel_uncore_forcewake_domain *d)
907b28c5 75{
577ac4bd 76 if (wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) &
05a2fb15 77 FORCEWAKE_KERNEL) == 0,
907b28c5 78 FORCEWAKE_ACK_TIMEOUT_MS))
05a2fb15
MK
79 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
80 intel_uncore_forcewake_domain_to_str(d->id));
81}
907b28c5 82
05a2fb15 83static inline void
577ac4bd
CW
84fw_domain_get(struct drm_i915_private *i915,
85 const struct intel_uncore_forcewake_domain *d)
05a2fb15 86{
577ac4bd 87 __raw_i915_write32(i915, d->reg_set, d->val_set);
05a2fb15 88}
907b28c5 89
05a2fb15 90static inline void
577ac4bd
CW
91fw_domain_wait_ack(struct drm_i915_private *i915,
92 const struct intel_uncore_forcewake_domain *d)
05a2fb15 93{
577ac4bd 94 if (wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) &
05a2fb15 95 FORCEWAKE_KERNEL),
907b28c5 96 FORCEWAKE_ACK_TIMEOUT_MS))
05a2fb15
MK
97 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
98 intel_uncore_forcewake_domain_to_str(d->id));
99}
907b28c5 100
05a2fb15 101static inline void
577ac4bd
CW
102fw_domain_put(struct drm_i915_private *i915,
103 const struct intel_uncore_forcewake_domain *d)
05a2fb15 104{
577ac4bd 105 __raw_i915_write32(i915, d->reg_set, d->val_clear);
907b28c5
CW
106}
107
05a2fb15 108static inline void
577ac4bd
CW
109fw_domain_posting_read(struct drm_i915_private *i915,
110 const struct intel_uncore_forcewake_domain *d)
907b28c5 111{
05a2fb15 112 /* something from same cacheline, but not from the set register */
f0f59a00 113 if (i915_mmio_reg_valid(d->reg_post))
577ac4bd 114 __raw_posting_read(i915, d->reg_post);
907b28c5
CW
115}
116
05a2fb15 117static void
577ac4bd 118fw_domains_get(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
907b28c5 119{
05a2fb15 120 struct intel_uncore_forcewake_domain *d;
907b28c5 121
577ac4bd
CW
122 for_each_fw_domain_masked(d, fw_domains, i915) {
123 fw_domain_wait_ack_clear(i915, d);
124 fw_domain_get(i915, d);
05a2fb15 125 }
4e1176dd 126
577ac4bd
CW
127 for_each_fw_domain_masked(d, fw_domains, i915)
128 fw_domain_wait_ack(i915, d);
b8473050 129
577ac4bd 130 i915->uncore.fw_domains_active |= fw_domains;
05a2fb15 131}
907b28c5 132
05a2fb15 133static void
577ac4bd 134fw_domains_put(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
05a2fb15
MK
135{
136 struct intel_uncore_forcewake_domain *d;
907b28c5 137
577ac4bd
CW
138 for_each_fw_domain_masked(d, fw_domains, i915) {
139 fw_domain_put(i915, d);
140 fw_domain_posting_read(i915, d);
05a2fb15 141 }
b8473050 142
577ac4bd 143 i915->uncore.fw_domains_active &= ~fw_domains;
05a2fb15 144}
907b28c5 145
05a2fb15 146static void
577ac4bd 147fw_domains_posting_read(struct drm_i915_private *i915)
05a2fb15
MK
148{
149 struct intel_uncore_forcewake_domain *d;
05a2fb15
MK
150
151 /* No need to do for all, just do for first found */
577ac4bd
CW
152 for_each_fw_domain(d, i915) {
153 fw_domain_posting_read(i915, d);
05a2fb15
MK
154 break;
155 }
156}
157
158static void
577ac4bd
CW
159fw_domains_reset(struct drm_i915_private *i915,
160 enum forcewake_domains fw_domains)
05a2fb15
MK
161{
162 struct intel_uncore_forcewake_domain *d;
05a2fb15 163
577ac4bd 164 if (i915->uncore.fw_domains == 0)
3225b2f9 165 return;
f9b3927a 166
577ac4bd
CW
167 for_each_fw_domain_masked(d, fw_domains, i915)
168 fw_domain_reset(i915, d);
05a2fb15 169
577ac4bd 170 fw_domains_posting_read(i915);
05a2fb15
MK
171}
172
173static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
174{
175 /* w/a for a sporadic read returning 0 by waiting for the GT
176 * thread to wake up.
177 */
178 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
179 GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
180 DRM_ERROR("GT thread status wait timed out\n");
181}
182
183static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
48c1026a 184 enum forcewake_domains fw_domains)
05a2fb15
MK
185{
186 fw_domains_get(dev_priv, fw_domains);
907b28c5 187
05a2fb15 188 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
c549f738 189 __gen6_gt_wait_for_thread_c0(dev_priv);
907b28c5
CW
190}
191
192static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
193{
194 u32 gtfifodbg;
6af5d92f
CW
195
196 gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
90f256b5
VS
197 if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
198 __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
907b28c5
CW
199}
200
05a2fb15 201static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
48c1026a 202 enum forcewake_domains fw_domains)
907b28c5 203{
05a2fb15 204 fw_domains_put(dev_priv, fw_domains);
907b28c5
CW
205 gen6_gt_check_fifodbg(dev_priv);
206}
207
c32e3788
DG
208static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
209{
210 u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
211
212 return count & GT_FIFO_FREE_ENTRIES_MASK;
213}
214
907b28c5
CW
215static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
216{
217 int ret = 0;
218
5135d64b
D
219 /* On VLV, FIFO will be shared by both SW and HW.
220 * So, we need to read the FREE_ENTRIES everytime */
2d1fe073 221 if (IS_VALLEYVIEW(dev_priv))
c32e3788 222 dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
5135d64b 223
907b28c5
CW
224 if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
225 int loop = 500;
c32e3788
DG
226 u32 fifo = fifo_free_entries(dev_priv);
227
907b28c5
CW
228 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
229 udelay(10);
c32e3788 230 fifo = fifo_free_entries(dev_priv);
907b28c5
CW
231 }
232 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
233 ++ret;
234 dev_priv->uncore.fifo_count = fifo;
235 }
236 dev_priv->uncore.fifo_count--;
237
238 return ret;
239}
240
a57a4a67
TU
241static enum hrtimer_restart
242intel_uncore_fw_release_timer(struct hrtimer *timer)
38cff0b1 243{
a57a4a67
TU
244 struct intel_uncore_forcewake_domain *domain =
245 container_of(timer, struct intel_uncore_forcewake_domain, timer);
577ac4bd
CW
246 struct drm_i915_private *dev_priv =
247 container_of(domain, struct drm_i915_private, uncore.fw_domain[domain->id]);
b2cff0db 248 unsigned long irqflags;
38cff0b1 249
003342a5 250 assert_rpm_device_not_suspended(dev_priv);
38cff0b1 251
003342a5 252 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
b2cff0db
CW
253 if (WARN_ON(domain->wake_count == 0))
254 domain->wake_count++;
255
b8473050 256 if (--domain->wake_count == 0)
003342a5 257 dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
b2cff0db 258
003342a5 259 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
a57a4a67
TU
260
261 return HRTIMER_NORESTART;
38cff0b1
ZW
262}
263
68f60946
HG
264static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
265 bool restore)
38cff0b1 266{
48c1026a 267 unsigned long irqflags;
b2cff0db 268 struct intel_uncore_forcewake_domain *domain;
48c1026a 269 int retry_count = 100;
003342a5 270 enum forcewake_domains fw, active_domains;
38cff0b1 271
b2cff0db
CW
272 /* Hold uncore.lock across reset to prevent any register access
273 * with forcewake not set correctly. Wait until all pending
274 * timers are run before holding.
275 */
276 while (1) {
277 active_domains = 0;
38cff0b1 278
33c582c1 279 for_each_fw_domain(domain, dev_priv) {
a57a4a67 280 if (hrtimer_cancel(&domain->timer) == 0)
b2cff0db 281 continue;
38cff0b1 282
a57a4a67 283 intel_uncore_fw_release_timer(&domain->timer);
b2cff0db 284 }
aec347ab 285
b2cff0db 286 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
b2ec142c 287
33c582c1 288 for_each_fw_domain(domain, dev_priv) {
a57a4a67 289 if (hrtimer_active(&domain->timer))
33c582c1 290 active_domains |= domain->mask;
b2cff0db 291 }
3123fcaf 292
b2cff0db
CW
293 if (active_domains == 0)
294 break;
aec347ab 295
b2cff0db
CW
296 if (--retry_count == 0) {
297 DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
298 break;
299 }
0294ae7b 300
b2cff0db
CW
301 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
302 cond_resched();
303 }
0294ae7b 304
b2cff0db
CW
305 WARN_ON(active_domains);
306
003342a5 307 fw = dev_priv->uncore.fw_domains_active;
b2cff0db
CW
308 if (fw)
309 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
ef46e0d2 310
cb3600db 311 fw_domains_reset(dev_priv, dev_priv->uncore.fw_domains);
38cff0b1 312
0294ae7b 313 if (restore) { /* If reset with a user forcewake, try to restore */
0294ae7b
CW
314 if (fw)
315 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
316
dc97997a 317 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
0294ae7b 318 dev_priv->uncore.fifo_count =
c32e3788 319 fifo_free_entries(dev_priv);
0294ae7b
CW
320 }
321
b2cff0db 322 if (!restore)
59bad947 323 assert_forcewakes_inactive(dev_priv);
b2cff0db 324
0294ae7b 325 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
ef46e0d2
DV
326}
327
c02e85a0
MK
328static u64 gen9_edram_size(struct drm_i915_private *dev_priv)
329{
330 const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
331 const unsigned int sets[4] = { 1, 1, 2, 2 };
332 const u32 cap = dev_priv->edram_cap;
333
334 return EDRAM_NUM_BANKS(cap) *
335 ways[EDRAM_WAYS_IDX(cap)] *
336 sets[EDRAM_SETS_IDX(cap)] *
337 1024 * 1024;
338}
339
3accaf7e 340u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv)
907b28c5 341{
3accaf7e
MK
342 if (!HAS_EDRAM(dev_priv))
343 return 0;
344
c02e85a0
MK
345 /* The needed capability bits for size calculation
346 * are not there with pre gen9 so return 128MB always.
3accaf7e 347 */
c02e85a0
MK
348 if (INTEL_GEN(dev_priv) < 9)
349 return 128 * 1024 * 1024;
3accaf7e 350
c02e85a0 351 return gen9_edram_size(dev_priv);
3accaf7e 352}
907b28c5 353
3accaf7e
MK
354static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv)
355{
356 if (IS_HASWELL(dev_priv) ||
357 IS_BROADWELL(dev_priv) ||
358 INTEL_GEN(dev_priv) >= 9) {
359 dev_priv->edram_cap = __raw_i915_read32(dev_priv,
360 HSW_EDRAM_CAP);
361
362 /* NB: We can't write IDICR yet because we do not have gt funcs
18ce3994 363 * set up */
3accaf7e
MK
364 } else {
365 dev_priv->edram_cap = 0;
18ce3994 366 }
3accaf7e
MK
367
368 if (HAS_EDRAM(dev_priv))
369 DRM_INFO("Found %lluMB of eDRAM\n",
370 intel_uncore_edram_size(dev_priv) / (1024 * 1024));
f9b3927a
MK
371}
372
8a47eb19 373static bool
8ac3e1bb 374fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
8a47eb19
MK
375{
376 u32 dbg;
377
8a47eb19
MK
378 dbg = __raw_i915_read32(dev_priv, FPGA_DBG);
379 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
380 return false;
381
382 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
383
384 return true;
385}
386
8ac3e1bb
MK
387static bool
388vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
389{
390 u32 cer;
391
392 cer = __raw_i915_read32(dev_priv, CLAIM_ER);
393 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
394 return false;
395
396 __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR);
397
398 return true;
399}
400
401static bool
402check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
403{
404 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
405 return fpga_check_for_unclaimed_mmio(dev_priv);
406
407 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
408 return vlv_check_for_unclaimed_mmio(dev_priv);
409
410 return false;
411}
412
dc97997a 413static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
f9b3927a
MK
414 bool restore_forcewake)
415{
85ee17eb
PP
416 struct intel_device_info *info = mkwrite_device_info(dev_priv);
417
8a47eb19
MK
418 /* clear out unclaimed reg detection bit */
419 if (check_for_unclaimed_mmio(dev_priv))
420 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
907b28c5 421
97058870 422 /* clear out old GT FIFO errors */
dc97997a 423 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
97058870
VS
424 __raw_i915_write32(dev_priv, GTFIFODBG,
425 __raw_i915_read32(dev_priv, GTFIFODBG));
426
a04f90a3 427 /* WaDisableShadowRegForCpd:chv */
dc97997a 428 if (IS_CHERRYVIEW(dev_priv)) {
a04f90a3
D
429 __raw_i915_write32(dev_priv, GTFIFOCTL,
430 __raw_i915_read32(dev_priv, GTFIFOCTL) |
431 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
432 GT_FIFO_CTL_RC6_POLICY_STALL);
433 }
434
a3f79ca6 435 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST))
85ee17eb
PP
436 info->has_decoupled_mmio = false;
437
dc97997a 438 intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
521198a2
MK
439}
440
68f60946 441void intel_uncore_suspend(struct drm_i915_private *dev_priv)
ed493883 442{
264ec1a8
HG
443 iosf_mbi_unregister_pmic_bus_access_notifier(
444 &dev_priv->uncore.pmic_bus_access_nb);
68f60946
HG
445 intel_uncore_forcewake_reset(dev_priv, false);
446}
447
448void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
449{
450 __intel_uncore_early_sanitize(dev_priv, true);
264ec1a8
HG
451 iosf_mbi_register_pmic_bus_access_notifier(
452 &dev_priv->uncore.pmic_bus_access_nb);
dc97997a 453 i915_check_and_clear_faults(dev_priv);
ed493883
ID
454}
455
dc97997a 456void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
521198a2 457{
dc97997a 458 i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6);
274008e8 459
907b28c5 460 /* BIOS often leaves RC6 enabled, but disable it for hw init */
54b4f68f 461 intel_sanitize_gt_powersave(dev_priv);
907b28c5
CW
462}
463
a6111f7b
CW
464static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
465 enum forcewake_domains fw_domains)
466{
467 struct intel_uncore_forcewake_domain *domain;
a6111f7b 468
a6111f7b
CW
469 fw_domains &= dev_priv->uncore.fw_domains;
470
33c582c1 471 for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
a6111f7b 472 if (domain->wake_count++)
33c582c1 473 fw_domains &= ~domain->mask;
a6111f7b
CW
474 }
475
b8473050 476 if (fw_domains)
a6111f7b
CW
477 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
478}
479
59bad947
MK
480/**
481 * intel_uncore_forcewake_get - grab forcewake domain references
482 * @dev_priv: i915 device instance
483 * @fw_domains: forcewake domains to get reference on
484 *
485 * This function can be used get GT's forcewake domain references.
486 * Normal register access will handle the forcewake domains automatically.
487 * However if some sequence requires the GT to not power down a particular
488 * forcewake domains this function should be called at the beginning of the
489 * sequence. And subsequently the reference should be dropped by symmetric
490 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
491 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
907b28c5 492 */
59bad947 493void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
48c1026a 494 enum forcewake_domains fw_domains)
907b28c5
CW
495{
496 unsigned long irqflags;
497
ab484f8f
BW
498 if (!dev_priv->uncore.funcs.force_wake_get)
499 return;
500
c9b8846a 501 assert_rpm_wakelock_held(dev_priv);
c8c8fb33 502
6daccb0b 503 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
a6111f7b 504 __intel_uncore_forcewake_get(dev_priv, fw_domains);
907b28c5
CW
505 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
506}
507
59bad947 508/**
a6111f7b 509 * intel_uncore_forcewake_get__locked - grab forcewake domain references
59bad947 510 * @dev_priv: i915 device instance
a6111f7b 511 * @fw_domains: forcewake domains to get reference on
59bad947 512 *
a6111f7b
CW
513 * See intel_uncore_forcewake_get(). This variant places the onus
514 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
907b28c5 515 */
a6111f7b
CW
516void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
517 enum forcewake_domains fw_domains)
518{
67520415 519 lockdep_assert_held(&dev_priv->uncore.lock);
a6111f7b
CW
520
521 if (!dev_priv->uncore.funcs.force_wake_get)
522 return;
523
524 __intel_uncore_forcewake_get(dev_priv, fw_domains);
525}
526
527static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
528 enum forcewake_domains fw_domains)
907b28c5 529{
b2cff0db 530 struct intel_uncore_forcewake_domain *domain;
907b28c5 531
b2cff0db
CW
532 fw_domains &= dev_priv->uncore.fw_domains;
533
33c582c1 534 for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
b2cff0db
CW
535 if (WARN_ON(domain->wake_count == 0))
536 continue;
537
538 if (--domain->wake_count)
539 continue;
540
05a2fb15 541 fw_domain_arm_timer(domain);
aec347ab 542 }
a6111f7b 543}
dc9fb09c 544
a6111f7b
CW
545/**
546 * intel_uncore_forcewake_put - release a forcewake domain reference
547 * @dev_priv: i915 device instance
548 * @fw_domains: forcewake domains to put references
549 *
550 * This function drops the device-level forcewakes for specified
551 * domains obtained by intel_uncore_forcewake_get().
552 */
553void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
554 enum forcewake_domains fw_domains)
555{
556 unsigned long irqflags;
557
558 if (!dev_priv->uncore.funcs.force_wake_put)
559 return;
560
561 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
562 __intel_uncore_forcewake_put(dev_priv, fw_domains);
907b28c5
CW
563 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
564}
565
a6111f7b
CW
566/**
567 * intel_uncore_forcewake_put__locked - grab forcewake domain references
568 * @dev_priv: i915 device instance
569 * @fw_domains: forcewake domains to get reference on
570 *
571 * See intel_uncore_forcewake_put(). This variant places the onus
572 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
573 */
574void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
575 enum forcewake_domains fw_domains)
576{
67520415 577 lockdep_assert_held(&dev_priv->uncore.lock);
a6111f7b
CW
578
579 if (!dev_priv->uncore.funcs.force_wake_put)
580 return;
581
582 __intel_uncore_forcewake_put(dev_priv, fw_domains);
583}
584
59bad947 585void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
e998c40f
PZ
586{
587 if (!dev_priv->uncore.funcs.force_wake_get)
588 return;
589
003342a5 590 WARN_ON(dev_priv->uncore.fw_domains_active);
e998c40f
PZ
591}
592
907b28c5 593/* We give fast paths for the really cool registers */
40181697 594#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
907b28c5 595
6863b76c
TU
596#define __gen6_reg_read_fw_domains(offset) \
597({ \
598 enum forcewake_domains __fwd; \
599 if (NEEDS_FORCE_WAKE(offset)) \
600 __fwd = FORCEWAKE_RENDER; \
601 else \
602 __fwd = 0; \
603 __fwd; \
604})
605
9480dbf0 606static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
91e630b9 607{
91e630b9
TU
608 if (offset < entry->start)
609 return -1;
610 else if (offset > entry->end)
611 return 1;
612 else
613 return 0;
614}
615
9480dbf0
TU
616/* Copied and "macroized" from lib/bsearch.c */
617#define BSEARCH(key, base, num, cmp) ({ \
618 unsigned int start__ = 0, end__ = (num); \
619 typeof(base) result__ = NULL; \
620 while (start__ < end__) { \
621 unsigned int mid__ = start__ + (end__ - start__) / 2; \
622 int ret__ = (cmp)((key), (base) + mid__); \
623 if (ret__ < 0) { \
624 end__ = mid__; \
625 } else if (ret__ > 0) { \
626 start__ = mid__ + 1; \
627 } else { \
628 result__ = (base) + mid__; \
629 break; \
630 } \
631 } \
632 result__; \
633})
634
9fc1117c 635static enum forcewake_domains
15157970 636find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
9fc1117c 637{
9480dbf0 638 const struct intel_forcewake_range *entry;
9fc1117c 639
9480dbf0
TU
640 entry = BSEARCH(offset,
641 dev_priv->uncore.fw_domains_table,
642 dev_priv->uncore.fw_domains_table_entries,
91e630b9 643 fw_range_cmp);
38fb6a40 644
99191427
JL
645 if (!entry)
646 return 0;
647
648 WARN(entry->domains & ~dev_priv->uncore.fw_domains,
649 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
650 entry->domains & ~dev_priv->uncore.fw_domains, offset);
651
652 return entry->domains;
9fc1117c
TU
653}
654
655#define GEN_FW_RANGE(s, e, d) \
656 { .start = (s), .end = (e), .domains = (d) }
1938e59a 657
895833bd
TU
658#define HAS_FWTABLE(dev_priv) \
659 (IS_GEN9(dev_priv) || \
660 IS_CHERRYVIEW(dev_priv) || \
661 IS_VALLEYVIEW(dev_priv))
662
b0081239 663/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
9fc1117c
TU
664static const struct intel_forcewake_range __vlv_fw_ranges[] = {
665 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
666 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
667 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
9fc1117c
TU
668 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
669 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
b0081239 670 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
9fc1117c
TU
671 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
672};
1938e59a 673
895833bd 674#define __fwtable_reg_read_fw_domains(offset) \
6863b76c
TU
675({ \
676 enum forcewake_domains __fwd = 0; \
0dd356bb 677 if (NEEDS_FORCE_WAKE((offset))) \
15157970 678 __fwd = find_fw_domain(dev_priv, offset); \
6863b76c
TU
679 __fwd; \
680})
681
47188574 682/* *Must* be sorted by offset! See intel_shadow_table_check(). */
6863b76c 683static const i915_reg_t gen8_shadowed_regs[] = {
47188574
TU
684 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
685 GEN6_RPNSWREQ, /* 0xA008 */
686 GEN6_RC_VIDEO_FREQ, /* 0xA00C */
687 RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */
688 RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */
689 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
6863b76c
TU
690 /* TODO: Other registers are not yet used */
691};
692
9480dbf0 693static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
5a659383 694{
9480dbf0 695 u32 offset = i915_mmio_reg_offset(*reg);
5a659383 696
9480dbf0 697 if (key < offset)
5a659383 698 return -1;
9480dbf0 699 else if (key > offset)
5a659383
TU
700 return 1;
701 else
702 return 0;
703}
704
6863b76c
TU
705static bool is_gen8_shadowed(u32 offset)
706{
9480dbf0 707 const i915_reg_t *regs = gen8_shadowed_regs;
5a659383 708
9480dbf0
TU
709 return BSEARCH(offset, regs, ARRAY_SIZE(gen8_shadowed_regs),
710 mmio_reg_cmp);
6863b76c
TU
711}
712
713#define __gen8_reg_write_fw_domains(offset) \
714({ \
715 enum forcewake_domains __fwd; \
716 if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
717 __fwd = FORCEWAKE_RENDER; \
718 else \
719 __fwd = 0; \
720 __fwd; \
721})
722
b0081239 723/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
9fc1117c
TU
724static const struct intel_forcewake_range __chv_fw_ranges[] = {
725 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
b0081239 726 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c 727 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
b0081239 728 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c 729 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
b0081239 730 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c 731 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
b0081239
TU
732 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
733 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
9fc1117c 734 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
b0081239
TU
735 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
736 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c
TU
737 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
738 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
739 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
740 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
9fc1117c 741};
38fb6a40 742
22d48c55 743#define __fwtable_reg_write_fw_domains(offset) \
6863b76c
TU
744({ \
745 enum forcewake_domains __fwd = 0; \
0dd356bb 746 if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
15157970 747 __fwd = find_fw_domain(dev_priv, offset); \
6863b76c
TU
748 __fwd; \
749})
750
b0081239 751/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
9fc1117c 752static const struct intel_forcewake_range __gen9_fw_ranges[] = {
0dd356bb 753 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
9fc1117c
TU
754 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
755 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
0dd356bb 756 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
9fc1117c 757 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
0dd356bb 758 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
9fc1117c 759 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
0dd356bb 760 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER),
b0081239 761 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
9fc1117c 762 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
0dd356bb 763 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
9fc1117c 764 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
0dd356bb 765 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
b0081239 766 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
0dd356bb 767 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER),
9fc1117c 768 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
0dd356bb 769 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
9fc1117c 770 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
0dd356bb 771 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
b0081239 772 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
78424c92 773 GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER),
9fc1117c 774 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
0dd356bb 775 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER),
b0081239 776 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
0dd356bb 777 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER),
9fc1117c 778 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
0dd356bb 779 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER),
9fc1117c 780 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
0dd356bb 781 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER),
b0081239 782 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
0dd356bb 783 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER),
9fc1117c
TU
784 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
785};
6863b76c 786
907b28c5
CW
787static void
788ilk_dummy_write(struct drm_i915_private *dev_priv)
789{
790 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
791 * the chip from rc6 before touching it for real. MI_MODE is masked,
792 * hence harmless to write 0 into. */
6af5d92f 793 __raw_i915_write32(dev_priv, MI_MODE, 0);
907b28c5
CW
794}
795
796static void
9c053501
MK
797__unclaimed_reg_debug(struct drm_i915_private *dev_priv,
798 const i915_reg_t reg,
799 const bool read,
800 const bool before)
907b28c5 801{
dda96033
CW
802 if (WARN(check_for_unclaimed_mmio(dev_priv) && !before,
803 "Unclaimed %s register 0x%x\n",
804 read ? "read from" : "write to",
4bd0a25d 805 i915_mmio_reg_offset(reg)))
48572edd 806 i915.mmio_debug--; /* Only report the first N failures */
907b28c5
CW
807}
808
9c053501
MK
809static inline void
810unclaimed_reg_debug(struct drm_i915_private *dev_priv,
811 const i915_reg_t reg,
812 const bool read,
813 const bool before)
814{
815 if (likely(!i915.mmio_debug))
816 return;
817
818 __unclaimed_reg_debug(dev_priv, reg, read, before);
819}
820
85ee17eb
PP
821static const enum decoupled_power_domain fw2dpd_domain[] = {
822 GEN9_DECOUPLED_PD_RENDER,
823 GEN9_DECOUPLED_PD_BLITTER,
824 GEN9_DECOUPLED_PD_ALL,
825 GEN9_DECOUPLED_PD_MEDIA,
826 GEN9_DECOUPLED_PD_ALL,
827 GEN9_DECOUPLED_PD_ALL,
828 GEN9_DECOUPLED_PD_ALL
829};
830
831/*
832 * Decoupled MMIO access for only 1 DWORD
833 */
834static void __gen9_decoupled_mmio_access(struct drm_i915_private *dev_priv,
835 u32 reg,
836 enum forcewake_domains fw_domain,
837 enum decoupled_ops operation)
838{
839 enum decoupled_power_domain dp_domain;
840 u32 ctrl_reg_data = 0;
841
842 dp_domain = fw2dpd_domain[fw_domain - 1];
843
844 ctrl_reg_data |= reg;
845 ctrl_reg_data |= (operation << GEN9_DECOUPLED_OP_SHIFT);
846 ctrl_reg_data |= (dp_domain << GEN9_DECOUPLED_PD_SHIFT);
847 ctrl_reg_data |= GEN9_DECOUPLED_DW1_GO;
848 __raw_i915_write32(dev_priv, GEN9_DECOUPLED_REG0_DW1, ctrl_reg_data);
849
850 if (wait_for_atomic((__raw_i915_read32(dev_priv,
851 GEN9_DECOUPLED_REG0_DW1) &
852 GEN9_DECOUPLED_DW1_GO) == 0,
853 FORCEWAKE_ACK_TIMEOUT_MS))
854 DRM_ERROR("Decoupled MMIO wait timed out\n");
855}
856
857static inline u32
858__gen9_decoupled_mmio_read32(struct drm_i915_private *dev_priv,
859 u32 reg,
860 enum forcewake_domains fw_domain)
861{
862 __gen9_decoupled_mmio_access(dev_priv, reg, fw_domain,
863 GEN9_DECOUPLED_OP_READ);
864
865 return __raw_i915_read32(dev_priv, GEN9_DECOUPLED_REG0_DW0);
866}
867
868static inline void
869__gen9_decoupled_mmio_write(struct drm_i915_private *dev_priv,
870 u32 reg, u32 data,
871 enum forcewake_domains fw_domain)
872{
873
874 __raw_i915_write32(dev_priv, GEN9_DECOUPLED_REG0_DW0, data);
875
876 __gen9_decoupled_mmio_access(dev_priv, reg, fw_domain,
877 GEN9_DECOUPLED_OP_WRITE);
878}
879
880
51f67885 881#define GEN2_READ_HEADER(x) \
5d738795 882 u##x val = 0; \
da5827c3 883 assert_rpm_wakelock_held(dev_priv);
5d738795 884
51f67885 885#define GEN2_READ_FOOTER \
5d738795
BW
886 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
887 return val
888
51f67885 889#define __gen2_read(x) \
0b274481 890static u##x \
f0f59a00 891gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
51f67885 892 GEN2_READ_HEADER(x); \
3967018e 893 val = __raw_i915_read##x(dev_priv, reg); \
51f67885 894 GEN2_READ_FOOTER; \
3967018e
BW
895}
896
897#define __gen5_read(x) \
898static u##x \
f0f59a00 899gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
51f67885 900 GEN2_READ_HEADER(x); \
3967018e
BW
901 ilk_dummy_write(dev_priv); \
902 val = __raw_i915_read##x(dev_priv, reg); \
51f67885 903 GEN2_READ_FOOTER; \
3967018e
BW
904}
905
51f67885
CW
906__gen5_read(8)
907__gen5_read(16)
908__gen5_read(32)
909__gen5_read(64)
910__gen2_read(8)
911__gen2_read(16)
912__gen2_read(32)
913__gen2_read(64)
914
915#undef __gen5_read
916#undef __gen2_read
917
918#undef GEN2_READ_FOOTER
919#undef GEN2_READ_HEADER
920
921#define GEN6_READ_HEADER(x) \
f0f59a00 922 u32 offset = i915_mmio_reg_offset(reg); \
51f67885
CW
923 unsigned long irqflags; \
924 u##x val = 0; \
da5827c3 925 assert_rpm_wakelock_held(dev_priv); \
9c053501
MK
926 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
927 unclaimed_reg_debug(dev_priv, reg, true, true)
51f67885
CW
928
929#define GEN6_READ_FOOTER \
9c053501 930 unclaimed_reg_debug(dev_priv, reg, true, false); \
51f67885
CW
931 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
932 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
933 return val
934
c521b0c8
TU
935static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
936 enum forcewake_domains fw_domains)
b2cff0db
CW
937{
938 struct intel_uncore_forcewake_domain *domain;
b2cff0db 939
c521b0c8
TU
940 for_each_fw_domain_masked(domain, fw_domains, dev_priv)
941 fw_domain_arm_timer(domain);
942
943 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
c521b0c8
TU
944}
945
946static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
947 enum forcewake_domains fw_domains)
948{
b2cff0db
CW
949 if (WARN_ON(!fw_domains))
950 return;
951
003342a5
TU
952 /* Turn on all requested but inactive supported forcewake domains. */
953 fw_domains &= dev_priv->uncore.fw_domains;
954 fw_domains &= ~dev_priv->uncore.fw_domains_active;
b2cff0db 955
c521b0c8
TU
956 if (fw_domains)
957 ___force_wake_auto(dev_priv, fw_domains);
b2cff0db
CW
958}
959
ccfceda2 960#define __gen_read(func, x) \
3967018e 961static u##x \
ccfceda2 962func##_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
6863b76c 963 enum forcewake_domains fw_engine; \
51f67885 964 GEN6_READ_HEADER(x); \
ccfceda2 965 fw_engine = __##func##_reg_read_fw_domains(offset); \
6a42d0f4 966 if (fw_engine) \
b208ba8e 967 __force_wake_auto(dev_priv, fw_engine); \
6fe72865 968 val = __raw_i915_read##x(dev_priv, reg); \
51f67885 969 GEN6_READ_FOOTER; \
940aece4 970}
ccfceda2
DCS
971#define __gen6_read(x) __gen_read(gen6, x)
972#define __fwtable_read(x) __gen_read(fwtable, x)
940aece4 973
85ee17eb
PP
974#define __gen9_decoupled_read(x) \
975static u##x \
976gen9_decoupled_read##x(struct drm_i915_private *dev_priv, \
977 i915_reg_t reg, bool trace) { \
978 enum forcewake_domains fw_engine; \
979 GEN6_READ_HEADER(x); \
980 fw_engine = __fwtable_reg_read_fw_domains(offset); \
981 if (fw_engine & ~dev_priv->uncore.fw_domains_active) { \
982 unsigned i; \
983 u32 *ptr_data = (u32 *) &val; \
984 for (i = 0; i < x/32; i++, offset += sizeof(u32), ptr_data++) \
985 *ptr_data = __gen9_decoupled_mmio_read32(dev_priv, \
986 offset, \
987 fw_engine); \
988 } else { \
989 val = __raw_i915_read##x(dev_priv, reg); \
990 } \
991 GEN6_READ_FOOTER; \
992}
993
994__gen9_decoupled_read(32)
995__gen9_decoupled_read(64)
6044c4a3
TU
996__fwtable_read(8)
997__fwtable_read(16)
998__fwtable_read(32)
999__fwtable_read(64)
3967018e
BW
1000__gen6_read(8)
1001__gen6_read(16)
1002__gen6_read(32)
1003__gen6_read(64)
3967018e 1004
6044c4a3 1005#undef __fwtable_read
3967018e 1006#undef __gen6_read
51f67885
CW
1007#undef GEN6_READ_FOOTER
1008#undef GEN6_READ_HEADER
5d738795 1009
51f67885 1010#define GEN2_WRITE_HEADER \
5d738795 1011 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
da5827c3 1012 assert_rpm_wakelock_held(dev_priv); \
907b28c5 1013
51f67885 1014#define GEN2_WRITE_FOOTER
0d965301 1015
51f67885 1016#define __gen2_write(x) \
0b274481 1017static void \
f0f59a00 1018gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
51f67885 1019 GEN2_WRITE_HEADER; \
4032ef43 1020 __raw_i915_write##x(dev_priv, reg, val); \
51f67885 1021 GEN2_WRITE_FOOTER; \
4032ef43
BW
1022}
1023
1024#define __gen5_write(x) \
1025static void \
f0f59a00 1026gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
51f67885 1027 GEN2_WRITE_HEADER; \
4032ef43
BW
1028 ilk_dummy_write(dev_priv); \
1029 __raw_i915_write##x(dev_priv, reg, val); \
51f67885 1030 GEN2_WRITE_FOOTER; \
4032ef43
BW
1031}
1032
51f67885
CW
1033__gen5_write(8)
1034__gen5_write(16)
1035__gen5_write(32)
51f67885
CW
1036__gen2_write(8)
1037__gen2_write(16)
1038__gen2_write(32)
51f67885
CW
1039
1040#undef __gen5_write
1041#undef __gen2_write
1042
1043#undef GEN2_WRITE_FOOTER
1044#undef GEN2_WRITE_HEADER
1045
1046#define GEN6_WRITE_HEADER \
f0f59a00 1047 u32 offset = i915_mmio_reg_offset(reg); \
51f67885
CW
1048 unsigned long irqflags; \
1049 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
da5827c3 1050 assert_rpm_wakelock_held(dev_priv); \
9c053501
MK
1051 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
1052 unclaimed_reg_debug(dev_priv, reg, false, true)
51f67885
CW
1053
1054#define GEN6_WRITE_FOOTER \
9c053501 1055 unclaimed_reg_debug(dev_priv, reg, false, false); \
51f67885
CW
1056 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
1057
4032ef43
BW
1058#define __gen6_write(x) \
1059static void \
f0f59a00 1060gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
4032ef43 1061 u32 __fifo_ret = 0; \
51f67885 1062 GEN6_WRITE_HEADER; \
0670c5a6 1063 if (NEEDS_FORCE_WAKE(offset)) { \
4032ef43
BW
1064 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
1065 } \
1066 __raw_i915_write##x(dev_priv, reg, val); \
1067 if (unlikely(__fifo_ret)) { \
1068 gen6_gt_check_fifodbg(dev_priv); \
1069 } \
51f67885 1070 GEN6_WRITE_FOOTER; \
4032ef43
BW
1071}
1072
ccfceda2 1073#define __gen_write(func, x) \
ab2aa47e 1074static void \
ccfceda2 1075func##_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
6863b76c 1076 enum forcewake_domains fw_engine; \
51f67885 1077 GEN6_WRITE_HEADER; \
ccfceda2 1078 fw_engine = __##func##_reg_write_fw_domains(offset); \
6a42d0f4 1079 if (fw_engine) \
b208ba8e 1080 __force_wake_auto(dev_priv, fw_engine); \
1938e59a 1081 __raw_i915_write##x(dev_priv, reg, val); \
51f67885 1082 GEN6_WRITE_FOOTER; \
1938e59a 1083}
ccfceda2
DCS
1084#define __gen8_write(x) __gen_write(gen8, x)
1085#define __fwtable_write(x) __gen_write(fwtable, x)
1938e59a 1086
85ee17eb
PP
1087#define __gen9_decoupled_write(x) \
1088static void \
1089gen9_decoupled_write##x(struct drm_i915_private *dev_priv, \
1090 i915_reg_t reg, u##x val, \
1091 bool trace) { \
1092 enum forcewake_domains fw_engine; \
1093 GEN6_WRITE_HEADER; \
1094 fw_engine = __fwtable_reg_write_fw_domains(offset); \
1095 if (fw_engine & ~dev_priv->uncore.fw_domains_active) \
1096 __gen9_decoupled_mmio_write(dev_priv, \
1097 offset, \
1098 val, \
1099 fw_engine); \
1100 else \
1101 __raw_i915_write##x(dev_priv, reg, val); \
1102 GEN6_WRITE_FOOTER; \
1103}
1104
1105__gen9_decoupled_write(32)
22d48c55
TU
1106__fwtable_write(8)
1107__fwtable_write(16)
1108__fwtable_write(32)
ab2aa47e
BW
1109__gen8_write(8)
1110__gen8_write(16)
1111__gen8_write(32)
4032ef43
BW
1112__gen6_write(8)
1113__gen6_write(16)
1114__gen6_write(32)
4032ef43 1115
22d48c55 1116#undef __fwtable_write
ab2aa47e 1117#undef __gen8_write
4032ef43 1118#undef __gen6_write
51f67885
CW
1119#undef GEN6_WRITE_FOOTER
1120#undef GEN6_WRITE_HEADER
907b28c5 1121
43d942a7
YZ
1122#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
1123do { \
1124 dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
1125 dev_priv->uncore.funcs.mmio_writew = x##_write16; \
1126 dev_priv->uncore.funcs.mmio_writel = x##_write32; \
43d942a7
YZ
1127} while (0)
1128
1129#define ASSIGN_READ_MMIO_VFUNCS(x) \
1130do { \
1131 dev_priv->uncore.funcs.mmio_readb = x##_read8; \
1132 dev_priv->uncore.funcs.mmio_readw = x##_read16; \
1133 dev_priv->uncore.funcs.mmio_readl = x##_read32; \
1134 dev_priv->uncore.funcs.mmio_readq = x##_read64; \
1135} while (0)
1136
05a2fb15
MK
1137
1138static void fw_domain_init(struct drm_i915_private *dev_priv,
48c1026a 1139 enum forcewake_domain_id domain_id,
f0f59a00
VS
1140 i915_reg_t reg_set,
1141 i915_reg_t reg_ack)
05a2fb15
MK
1142{
1143 struct intel_uncore_forcewake_domain *d;
1144
1145 if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
1146 return;
1147
1148 d = &dev_priv->uncore.fw_domain[domain_id];
1149
1150 WARN_ON(d->wake_count);
1151
1152 d->wake_count = 0;
1153 d->reg_set = reg_set;
1154 d->reg_ack = reg_ack;
1155
1156 if (IS_GEN6(dev_priv)) {
1157 d->val_reset = 0;
1158 d->val_set = FORCEWAKE_KERNEL;
1159 d->val_clear = 0;
1160 } else {
8543747c 1161 /* WaRsClearFWBitsAtReset:bdw,skl */
05a2fb15
MK
1162 d->val_reset = _MASKED_BIT_DISABLE(0xffff);
1163 d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
1164 d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
1165 }
1166
666a4537 1167 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
05a2fb15
MK
1168 d->reg_post = FORCEWAKE_ACK_VLV;
1169 else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
1170 d->reg_post = ECOBUS;
05a2fb15 1171
05a2fb15
MK
1172 d->id = domain_id;
1173
33c582c1
TU
1174 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
1175 BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
1176 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
1177
1178 d->mask = 1 << domain_id;
1179
a57a4a67
TU
1180 hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1181 d->timer.function = intel_uncore_fw_release_timer;
05a2fb15
MK
1182
1183 dev_priv->uncore.fw_domains |= (1 << domain_id);
f9b3927a 1184
577ac4bd 1185 fw_domain_reset(dev_priv, d);
05a2fb15
MK
1186}
1187
dc97997a 1188static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
0b274481 1189{
e3b1895f 1190 if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv))
3225b2f9
MK
1191 return;
1192
dc97997a 1193 if (IS_GEN9(dev_priv)) {
05a2fb15
MK
1194 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1195 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1196 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1197 FORCEWAKE_RENDER_GEN9,
1198 FORCEWAKE_ACK_RENDER_GEN9);
1199 fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
1200 FORCEWAKE_BLITTER_GEN9,
1201 FORCEWAKE_ACK_BLITTER_GEN9);
1202 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1203 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
dc97997a 1204 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
05a2fb15 1205 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
dc97997a 1206 if (!IS_CHERRYVIEW(dev_priv))
756c349d
MK
1207 dev_priv->uncore.funcs.force_wake_put =
1208 fw_domains_put_with_fifo;
1209 else
1210 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
05a2fb15
MK
1211 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1212 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1213 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1214 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
dc97997a 1215 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
05a2fb15
MK
1216 dev_priv->uncore.funcs.force_wake_get =
1217 fw_domains_get_with_thread_status;
dc97997a 1218 if (IS_HASWELL(dev_priv))
3d7d0c85
VS
1219 dev_priv->uncore.funcs.force_wake_put =
1220 fw_domains_put_with_fifo;
1221 else
1222 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
05a2fb15
MK
1223 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1224 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
dc97997a 1225 } else if (IS_IVYBRIDGE(dev_priv)) {
0b274481
BW
1226 u32 ecobus;
1227
1228 /* IVB configs may use multi-threaded forcewake */
1229
1230 /* A small trick here - if the bios hasn't configured
1231 * MT forcewake, and if the device is in RC6, then
1232 * force_wake_mt_get will not wake the device and the
1233 * ECOBUS read will return zero. Which will be
1234 * (correctly) interpreted by the test below as MT
1235 * forcewake being disabled.
1236 */
05a2fb15
MK
1237 dev_priv->uncore.funcs.force_wake_get =
1238 fw_domains_get_with_thread_status;
1239 dev_priv->uncore.funcs.force_wake_put =
1240 fw_domains_put_with_fifo;
1241
f9b3927a
MK
1242 /* We need to init first for ECOBUS access and then
1243 * determine later if we want to reinit, in case of MT access is
6ea2556f
MK
1244 * not working. In this stage we don't know which flavour this
1245 * ivb is, so it is better to reset also the gen6 fw registers
1246 * before the ecobus check.
f9b3927a 1247 */
6ea2556f
MK
1248
1249 __raw_i915_write32(dev_priv, FORCEWAKE, 0);
1250 __raw_posting_read(dev_priv, ECOBUS);
1251
05a2fb15
MK
1252 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1253 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
f9b3927a 1254
556ab7a6 1255 spin_lock_irq(&dev_priv->uncore.lock);
bd527504 1256 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_RENDER);
0b274481 1257 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
bd527504 1258 fw_domains_put_with_fifo(dev_priv, FORCEWAKE_RENDER);
556ab7a6 1259 spin_unlock_irq(&dev_priv->uncore.lock);
0b274481 1260
05a2fb15 1261 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
0b274481
BW
1262 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1263 DRM_INFO("when using vblank-synced partial screen updates.\n");
05a2fb15
MK
1264 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1265 FORCEWAKE, FORCEWAKE_ACK);
0b274481 1266 }
dc97997a 1267 } else if (IS_GEN6(dev_priv)) {
0b274481 1268 dev_priv->uncore.funcs.force_wake_get =
05a2fb15 1269 fw_domains_get_with_thread_status;
0b274481 1270 dev_priv->uncore.funcs.force_wake_put =
05a2fb15
MK
1271 fw_domains_put_with_fifo;
1272 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1273 FORCEWAKE, FORCEWAKE_ACK);
0b274481 1274 }
3225b2f9
MK
1275
1276 /* All future platforms are expected to require complex power gating */
1277 WARN_ON(dev_priv->uncore.fw_domains == 0);
f9b3927a
MK
1278}
1279
15157970
TU
1280#define ASSIGN_FW_DOMAINS_TABLE(d) \
1281{ \
1282 dev_priv->uncore.fw_domains_table = \
1283 (struct intel_forcewake_range *)(d); \
1284 dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \
1285}
1286
264ec1a8
HG
1287static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
1288 unsigned long action, void *data)
1289{
1290 struct drm_i915_private *dev_priv = container_of(nb,
1291 struct drm_i915_private, uncore.pmic_bus_access_nb);
1292
1293 switch (action) {
1294 case MBI_PMIC_BUS_ACCESS_BEGIN:
1295 /*
1296 * forcewake all now to make sure that we don't need to do a
1297 * forcewake later which on systems where this notifier gets
1298 * called requires the punit to access to the shared pmic i2c
1299 * bus, which will be busy after this notification, leading to:
1300 * "render: timed out waiting for forcewake ack request."
1301 * errors.
1302 */
1303 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1304 break;
1305 case MBI_PMIC_BUS_ACCESS_END:
1306 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1307 break;
1308 }
1309
1310 return NOTIFY_OK;
1311}
1312
dc97997a 1313void intel_uncore_init(struct drm_i915_private *dev_priv)
f9b3927a 1314{
dc97997a 1315 i915_check_vgpu(dev_priv);
cf9d2890 1316
3accaf7e 1317 intel_uncore_edram_detect(dev_priv);
dc97997a
CW
1318 intel_uncore_fw_domains_init(dev_priv);
1319 __intel_uncore_early_sanitize(dev_priv, false);
0b274481 1320
75714940 1321 dev_priv->uncore.unclaimed_mmio_check = 1;
264ec1a8
HG
1322 dev_priv->uncore.pmic_bus_access_nb.notifier_call =
1323 i915_pmic_bus_access_notifier;
75714940 1324
e3b1895f
TU
1325 if (IS_GEN(dev_priv, 2, 4) || intel_vgpu_active(dev_priv)) {
1326 ASSIGN_WRITE_MMIO_VFUNCS(gen2);
1327 ASSIGN_READ_MMIO_VFUNCS(gen2);
1328 } else if (IS_GEN5(dev_priv)) {
1329 ASSIGN_WRITE_MMIO_VFUNCS(gen5);
1330 ASSIGN_READ_MMIO_VFUNCS(gen5);
1331 } else if (IS_GEN(dev_priv, 6, 7)) {
1332 ASSIGN_WRITE_MMIO_VFUNCS(gen6);
1333
1334 if (IS_VALLEYVIEW(dev_priv)) {
1335 ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges);
1336 ASSIGN_READ_MMIO_VFUNCS(fwtable);
1337 } else {
1338 ASSIGN_READ_MMIO_VFUNCS(gen6);
85ee17eb 1339 }
e3b1895f 1340 } else if (IS_GEN8(dev_priv)) {
dc97997a 1341 if (IS_CHERRYVIEW(dev_priv)) {
15157970 1342 ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
22d48c55 1343 ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
6044c4a3 1344 ASSIGN_READ_MMIO_VFUNCS(fwtable);
1938e59a
D
1345
1346 } else {
43d942a7
YZ
1347 ASSIGN_WRITE_MMIO_VFUNCS(gen8);
1348 ASSIGN_READ_MMIO_VFUNCS(gen6);
1938e59a 1349 }
e3b1895f
TU
1350 } else {
1351 ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
1352 ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
1353 ASSIGN_READ_MMIO_VFUNCS(fwtable);
1354 if (HAS_DECOUPLED_MMIO(dev_priv)) {
1355 dev_priv->uncore.funcs.mmio_readl =
1356 gen9_decoupled_read32;
1357 dev_priv->uncore.funcs.mmio_readq =
1358 gen9_decoupled_read64;
1359 dev_priv->uncore.funcs.mmio_writel =
1360 gen9_decoupled_write32;
940aece4 1361 }
3967018e 1362 }
ed493883 1363
264ec1a8
HG
1364 iosf_mbi_register_pmic_bus_access_notifier(
1365 &dev_priv->uncore.pmic_bus_access_nb);
1366
dc97997a 1367 i915_check_and_clear_faults(dev_priv);
0b274481 1368}
43d942a7
YZ
1369#undef ASSIGN_WRITE_MMIO_VFUNCS
1370#undef ASSIGN_READ_MMIO_VFUNCS
0b274481 1371
dc97997a 1372void intel_uncore_fini(struct drm_i915_private *dev_priv)
0b274481 1373{
264ec1a8
HG
1374 iosf_mbi_unregister_pmic_bus_access_notifier(
1375 &dev_priv->uncore.pmic_bus_access_nb);
1376
0b274481 1377 /* Paranoia: make sure we have disabled everything before we exit. */
dc97997a
CW
1378 intel_uncore_sanitize(dev_priv);
1379 intel_uncore_forcewake_reset(dev_priv, false);
0b274481
BW
1380}
1381
ae5702d2 1382#define GEN_RANGE(l, h) GENMASK((h) - 1, (l) - 1)
af76ae44 1383
907b28c5 1384static const struct register_whitelist {
f0f59a00 1385 i915_reg_t offset_ldw, offset_udw;
907b28c5 1386 uint32_t size;
af76ae44
DL
1387 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1388 uint32_t gen_bitmask;
907b28c5 1389} whitelist[] = {
8697600b
VS
1390 { .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
1391 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
1392 .size = 8, .gen_bitmask = GEN_RANGE(4, 9) },
907b28c5
CW
1393};
1394
1395int i915_reg_read_ioctl(struct drm_device *dev,
1396 void *data, struct drm_file *file)
1397{
fac5e23e 1398 struct drm_i915_private *dev_priv = to_i915(dev);
907b28c5
CW
1399 struct drm_i915_reg_read *reg = data;
1400 struct register_whitelist const *entry = whitelist;
648a9bc5 1401 unsigned size;
f0f59a00 1402 i915_reg_t offset_ldw, offset_udw;
cf67c70f 1403 int i, ret = 0;
907b28c5
CW
1404
1405 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
f0f59a00 1406 if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
66478475 1407 (INTEL_INFO(dev_priv)->gen_mask & entry->gen_bitmask))
907b28c5
CW
1408 break;
1409 }
1410
1411 if (i == ARRAY_SIZE(whitelist))
1412 return -EINVAL;
1413
648a9bc5
CW
1414 /* We use the low bits to encode extra flags as the register should
1415 * be naturally aligned (and those that are not so aligned merely
1416 * limit the available flags for that register).
1417 */
8697600b
VS
1418 offset_ldw = entry->offset_ldw;
1419 offset_udw = entry->offset_udw;
648a9bc5 1420 size = entry->size;
f0f59a00 1421 size |= reg->offset ^ i915_mmio_reg_offset(offset_ldw);
648a9bc5 1422
cf67c70f
PZ
1423 intel_runtime_pm_get(dev_priv);
1424
648a9bc5
CW
1425 switch (size) {
1426 case 8 | 1:
8697600b 1427 reg->val = I915_READ64_2x32(offset_ldw, offset_udw);
648a9bc5 1428 break;
907b28c5 1429 case 8:
8697600b 1430 reg->val = I915_READ64(offset_ldw);
907b28c5
CW
1431 break;
1432 case 4:
8697600b 1433 reg->val = I915_READ(offset_ldw);
907b28c5
CW
1434 break;
1435 case 2:
8697600b 1436 reg->val = I915_READ16(offset_ldw);
907b28c5
CW
1437 break;
1438 case 1:
8697600b 1439 reg->val = I915_READ8(offset_ldw);
907b28c5
CW
1440 break;
1441 default:
cf67c70f
PZ
1442 ret = -EINVAL;
1443 goto out;
907b28c5
CW
1444 }
1445
cf67c70f
PZ
1446out:
1447 intel_runtime_pm_put(dev_priv);
1448 return ret;
907b28c5
CW
1449}
1450
dc97997a 1451static int i915_reset_complete(struct pci_dev *pdev)
907b28c5
CW
1452{
1453 u8 gdrst;
dc97997a 1454 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
73bbf6bd 1455 return (gdrst & GRDOM_RESET_STATUS) == 0;
907b28c5
CW
1456}
1457
dc97997a 1458static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
907b28c5 1459{
91c8a326 1460 struct pci_dev *pdev = dev_priv->drm.pdev;
dc97997a 1461
73bbf6bd 1462 /* assert reset for at least 20 usec */
dc97997a 1463 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
73bbf6bd 1464 udelay(20);
dc97997a 1465 pci_write_config_byte(pdev, I915_GDRST, 0);
907b28c5 1466
dc97997a 1467 return wait_for(i915_reset_complete(pdev), 500);
73bbf6bd
VS
1468}
1469
dc97997a 1470static int g4x_reset_complete(struct pci_dev *pdev)
73bbf6bd
VS
1471{
1472 u8 gdrst;
dc97997a 1473 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
73bbf6bd 1474 return (gdrst & GRDOM_RESET_ENABLE) == 0;
907b28c5
CW
1475}
1476
dc97997a 1477static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
408d4b9e 1478{
91c8a326 1479 struct pci_dev *pdev = dev_priv->drm.pdev;
dc97997a
CW
1480 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1481 return wait_for(g4x_reset_complete(pdev), 500);
408d4b9e
VS
1482}
1483
dc97997a 1484static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
fa4f53c4 1485{
91c8a326 1486 struct pci_dev *pdev = dev_priv->drm.pdev;
fa4f53c4
VS
1487 int ret;
1488
dc97997a 1489 pci_write_config_byte(pdev, I915_GDRST,
fa4f53c4 1490 GRDOM_RENDER | GRDOM_RESET_ENABLE);
dc97997a 1491 ret = wait_for(g4x_reset_complete(pdev), 500);
fa4f53c4
VS
1492 if (ret)
1493 return ret;
1494
1495 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1496 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
1497 POSTING_READ(VDECCLK_GATE_D);
1498
dc97997a 1499 pci_write_config_byte(pdev, I915_GDRST,
fa4f53c4 1500 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
dc97997a 1501 ret = wait_for(g4x_reset_complete(pdev), 500);
fa4f53c4
VS
1502 if (ret)
1503 return ret;
1504
1505 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1506 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1507 POSTING_READ(VDECCLK_GATE_D);
1508
dc97997a 1509 pci_write_config_byte(pdev, I915_GDRST, 0);
fa4f53c4
VS
1510
1511 return 0;
1512}
1513
dc97997a
CW
1514static int ironlake_do_reset(struct drm_i915_private *dev_priv,
1515 unsigned engine_mask)
907b28c5 1516{
907b28c5
CW
1517 int ret;
1518
c039b7f2 1519 I915_WRITE(ILK_GDSR,
0f08ffd6 1520 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
87273b71
CW
1521 ret = intel_wait_for_register(dev_priv,
1522 ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
1523 500);
907b28c5
CW
1524 if (ret)
1525 return ret;
1526
c039b7f2 1527 I915_WRITE(ILK_GDSR,
0f08ffd6 1528 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
87273b71
CW
1529 ret = intel_wait_for_register(dev_priv,
1530 ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
1531 500);
9aa7250f
VS
1532 if (ret)
1533 return ret;
1534
c039b7f2 1535 I915_WRITE(ILK_GDSR, 0);
9aa7250f
VS
1536
1537 return 0;
907b28c5
CW
1538}
1539
ee4b6faf
MK
1540/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
1541static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
1542 u32 hw_domain_mask)
907b28c5 1543{
907b28c5
CW
1544 /* GEN6_GDRST is not in the gt power well, no need to check
1545 * for fifo space for the write or forcewake the chip for
1546 * the read
1547 */
ee4b6faf 1548 __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
907b28c5 1549
ee4b6faf 1550 /* Spin waiting for the device to ack the reset requests */
4a17fe13
CW
1551 return intel_wait_for_register_fw(dev_priv,
1552 GEN6_GDRST, hw_domain_mask, 0,
1553 500);
ee4b6faf
MK
1554}
1555
1556/**
1557 * gen6_reset_engines - reset individual engines
dc97997a 1558 * @dev_priv: i915 device
ee4b6faf
MK
1559 * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
1560 *
1561 * This function will reset the individual engines that are set in engine_mask.
1562 * If you provide ALL_ENGINES as mask, full global domain reset will be issued.
1563 *
1564 * Note: It is responsibility of the caller to handle the difference between
1565 * asking full domain reset versus reset for all available individual engines.
1566 *
1567 * Returns 0 on success, nonzero on error.
1568 */
dc97997a
CW
1569static int gen6_reset_engines(struct drm_i915_private *dev_priv,
1570 unsigned engine_mask)
ee4b6faf 1571{
ee4b6faf
MK
1572 struct intel_engine_cs *engine;
1573 const u32 hw_engine_mask[I915_NUM_ENGINES] = {
1574 [RCS] = GEN6_GRDOM_RENDER,
1575 [BCS] = GEN6_GRDOM_BLT,
1576 [VCS] = GEN6_GRDOM_MEDIA,
1577 [VCS2] = GEN8_GRDOM_MEDIA2,
1578 [VECS] = GEN6_GRDOM_VECS,
1579 };
1580 u32 hw_mask;
1581 int ret;
1582
1583 if (engine_mask == ALL_ENGINES) {
1584 hw_mask = GEN6_GRDOM_FULL;
1585 } else {
bafb0fce
CW
1586 unsigned int tmp;
1587
ee4b6faf 1588 hw_mask = 0;
bafb0fce 1589 for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
ee4b6faf
MK
1590 hw_mask |= hw_engine_mask[engine->id];
1591 }
1592
1593 ret = gen6_hw_domain_reset(dev_priv, hw_mask);
907b28c5 1594
dc97997a 1595 intel_uncore_forcewake_reset(dev_priv, true);
5babf0fc 1596
907b28c5
CW
1597 return ret;
1598}
1599
1758b90e
CW
1600/**
1601 * intel_wait_for_register_fw - wait until register matches expected state
1602 * @dev_priv: the i915 device
1603 * @reg: the register to read
1604 * @mask: mask to apply to register value
1605 * @value: expected value
1606 * @timeout_ms: timeout in millisecond
1607 *
1608 * This routine waits until the target register @reg contains the expected
3d466cd6
DV
1609 * @value after applying the @mask, i.e. it waits until ::
1610 *
1611 * (I915_READ_FW(reg) & mask) == value
1612 *
1758b90e
CW
1613 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
1614 *
1615 * Note that this routine assumes the caller holds forcewake asserted, it is
1616 * not suitable for very long waits. See intel_wait_for_register() if you
1617 * wish to wait without holding forcewake for the duration (i.e. you expect
1618 * the wait to be slow).
1619 *
1620 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1621 */
1622int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
1623 i915_reg_t reg,
1624 const u32 mask,
1625 const u32 value,
1626 const unsigned long timeout_ms)
1627{
1628#define done ((I915_READ_FW(reg) & mask) == value)
1629 int ret = wait_for_us(done, 2);
1630 if (ret)
1631 ret = wait_for(done, timeout_ms);
1632 return ret;
1633#undef done
1634}
1635
1636/**
1637 * intel_wait_for_register - wait until register matches expected state
1638 * @dev_priv: the i915 device
1639 * @reg: the register to read
1640 * @mask: mask to apply to register value
1641 * @value: expected value
1642 * @timeout_ms: timeout in millisecond
1643 *
1644 * This routine waits until the target register @reg contains the expected
3d466cd6
DV
1645 * @value after applying the @mask, i.e. it waits until ::
1646 *
1647 * (I915_READ(reg) & mask) == value
1648 *
1758b90e
CW
1649 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
1650 *
1651 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1652 */
1653int intel_wait_for_register(struct drm_i915_private *dev_priv,
1654 i915_reg_t reg,
1655 const u32 mask,
1656 const u32 value,
1657 const unsigned long timeout_ms)
7fd2d269 1658{
1758b90e
CW
1659
1660 unsigned fw =
1661 intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
1662 int ret;
1663
1664 intel_uncore_forcewake_get(dev_priv, fw);
1665 ret = wait_for_us((I915_READ_FW(reg) & mask) == value, 2);
1666 intel_uncore_forcewake_put(dev_priv, fw);
1667 if (ret)
1668 ret = wait_for((I915_READ_NOTRACE(reg) & mask) == value,
1669 timeout_ms);
1670
1671 return ret;
d431440c
TE
1672}
1673
1674static int gen8_request_engine_reset(struct intel_engine_cs *engine)
1675{
c033666a 1676 struct drm_i915_private *dev_priv = engine->i915;
d431440c 1677 int ret;
d431440c
TE
1678
1679 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1680 _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
1681
1758b90e
CW
1682 ret = intel_wait_for_register_fw(dev_priv,
1683 RING_RESET_CTL(engine->mmio_base),
1684 RESET_CTL_READY_TO_RESET,
1685 RESET_CTL_READY_TO_RESET,
1686 700);
d431440c
TE
1687 if (ret)
1688 DRM_ERROR("%s: reset request timeout\n", engine->name);
1689
1690 return ret;
1691}
1692
1693static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine)
1694{
c033666a 1695 struct drm_i915_private *dev_priv = engine->i915;
d431440c
TE
1696
1697 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1698 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
7fd2d269
MK
1699}
1700
dc97997a
CW
1701static int gen8_reset_engines(struct drm_i915_private *dev_priv,
1702 unsigned engine_mask)
7fd2d269 1703{
7fd2d269 1704 struct intel_engine_cs *engine;
bafb0fce 1705 unsigned int tmp;
7fd2d269 1706
bafb0fce 1707 for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
d431440c 1708 if (gen8_request_engine_reset(engine))
7fd2d269 1709 goto not_ready;
7fd2d269 1710
dc97997a 1711 return gen6_reset_engines(dev_priv, engine_mask);
7fd2d269
MK
1712
1713not_ready:
bafb0fce 1714 for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
d431440c 1715 gen8_unrequest_engine_reset(engine);
7fd2d269
MK
1716
1717 return -EIO;
1718}
1719
dc97997a
CW
1720typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
1721
1722static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
907b28c5 1723{
b1330fbb
CW
1724 if (!i915.reset)
1725 return NULL;
1726
dc97997a 1727 if (INTEL_INFO(dev_priv)->gen >= 8)
ee4b6faf 1728 return gen8_reset_engines;
dc97997a 1729 else if (INTEL_INFO(dev_priv)->gen >= 6)
ee4b6faf 1730 return gen6_reset_engines;
dc97997a 1731 else if (IS_GEN5(dev_priv))
49e4d842 1732 return ironlake_do_reset;
dc97997a 1733 else if (IS_G4X(dev_priv))
49e4d842 1734 return g4x_do_reset;
73f67aa8 1735 else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
49e4d842 1736 return g33_do_reset;
dc97997a 1737 else if (INTEL_INFO(dev_priv)->gen >= 3)
49e4d842 1738 return i915_do_reset;
542c184f 1739 else
49e4d842
CW
1740 return NULL;
1741}
1742
dc97997a 1743int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
49e4d842 1744{
dc97997a 1745 reset_func reset;
99106bc1 1746 int ret;
49e4d842 1747
dc97997a 1748 reset = intel_get_gpu_reset(dev_priv);
49e4d842 1749 if (reset == NULL)
542c184f 1750 return -ENODEV;
49e4d842 1751
99106bc1
MK
1752 /* If the power well sleeps during the reset, the reset
1753 * request may be dropped and never completes (causing -EIO).
1754 */
1755 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
dc97997a 1756 ret = reset(dev_priv, engine_mask);
99106bc1
MK
1757 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1758
1759 return ret;
49e4d842
CW
1760}
1761
dc97997a 1762bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
49e4d842 1763{
dc97997a 1764 return intel_get_gpu_reset(dev_priv) != NULL;
907b28c5
CW
1765}
1766
6b332fa2
AS
1767int intel_guc_reset(struct drm_i915_private *dev_priv)
1768{
1769 int ret;
1770 unsigned long irqflags;
1771
1a3d1898 1772 if (!HAS_GUC(dev_priv))
6b332fa2
AS
1773 return -EINVAL;
1774
1775 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1776 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1777
1778 ret = gen6_hw_domain_reset(dev_priv, GEN9_GRDOM_GUC);
1779
1780 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1781 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1782
1783 return ret;
1784}
1785
fc97618b 1786bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
907b28c5 1787{
fc97618b 1788 return check_for_unclaimed_mmio(dev_priv);
907b28c5 1789}
75714940 1790
bc3b9346 1791bool
75714940
MK
1792intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
1793{
1794 if (unlikely(i915.mmio_debug ||
1795 dev_priv->uncore.unclaimed_mmio_check <= 0))
bc3b9346 1796 return false;
75714940
MK
1797
1798 if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
1799 DRM_DEBUG("Unclaimed register detected, "
1800 "enabling oneshot unclaimed register reporting. "
1801 "Please use i915.mmio_debug=N for more information.\n");
1802 i915.mmio_debug++;
1803 dev_priv->uncore.unclaimed_mmio_check--;
bc3b9346 1804 return true;
75714940 1805 }
bc3b9346
MK
1806
1807 return false;
75714940 1808}
3756685a
TU
1809
1810static enum forcewake_domains
1811intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
1812 i915_reg_t reg)
1813{
895833bd 1814 u32 offset = i915_mmio_reg_offset(reg);
3756685a
TU
1815 enum forcewake_domains fw_domains;
1816
895833bd
TU
1817 if (HAS_FWTABLE(dev_priv)) {
1818 fw_domains = __fwtable_reg_read_fw_domains(offset);
1819 } else if (INTEL_GEN(dev_priv) >= 6) {
1820 fw_domains = __gen6_reg_read_fw_domains(offset);
1821 } else {
1822 WARN_ON(!IS_GEN(dev_priv, 2, 5));
1823 fw_domains = 0;
3756685a
TU
1824 }
1825
1826 WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
1827
1828 return fw_domains;
1829}
1830
1831static enum forcewake_domains
1832intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
1833 i915_reg_t reg)
1834{
22d48c55 1835 u32 offset = i915_mmio_reg_offset(reg);
3756685a
TU
1836 enum forcewake_domains fw_domains;
1837
22d48c55
TU
1838 if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
1839 fw_domains = __fwtable_reg_write_fw_domains(offset);
1840 } else if (IS_GEN8(dev_priv)) {
1841 fw_domains = __gen8_reg_write_fw_domains(offset);
1842 } else if (IS_GEN(dev_priv, 6, 7)) {
3756685a 1843 fw_domains = FORCEWAKE_RENDER;
22d48c55
TU
1844 } else {
1845 WARN_ON(!IS_GEN(dev_priv, 2, 5));
1846 fw_domains = 0;
3756685a
TU
1847 }
1848
1849 WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
1850
1851 return fw_domains;
1852}
1853
1854/**
1855 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
1856 * a register
1857 * @dev_priv: pointer to struct drm_i915_private
1858 * @reg: register in question
1859 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
1860 *
1861 * Returns a set of forcewake domains required to be taken with for example
1862 * intel_uncore_forcewake_get for the specified register to be accessible in the
1863 * specified mode (read, write or read/write) with raw mmio accessors.
1864 *
1865 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
1866 * callers to do FIFO management on their own or risk losing writes.
1867 */
1868enum forcewake_domains
1869intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
1870 i915_reg_t reg, unsigned int op)
1871{
1872 enum forcewake_domains fw_domains = 0;
1873
1874 WARN_ON(!op);
1875
895833bd
TU
1876 if (intel_vgpu_active(dev_priv))
1877 return 0;
1878
3756685a
TU
1879 if (op & FW_REG_READ)
1880 fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg);
1881
1882 if (op & FW_REG_WRITE)
1883 fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg);
1884
1885 return fw_domains;
1886}
26e7a2a1
CW
1887
1888#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1889#include "selftests/intel_uncore.c"
1890#endif