drm/i915: rename raw reg access functions
[linux-2.6-block.git] / drivers / gpu / drm / i915 / intel_uncore.c
CommitLineData
907b28c5
CW
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include "i915_drv.h"
25#include "intel_drv.h"
cf9d2890 26#include "i915_vgpu.h"
907b28c5 27
264ec1a8 28#include <asm/iosf_mbi.h>
6daccb0b
CW
29#include <linux/pm_runtime.h>
30
83e33372 31#define FORCEWAKE_ACK_TIMEOUT_MS 50
6b07b6d2 32#define GT_FIFO_TIMEOUT_MS 10
907b28c5 33
6cc5ca76 34#define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
6af5d92f 35
05a2fb15
MK
36static const char * const forcewake_domain_names[] = {
37 "render",
38 "blitter",
39 "media",
a89a70a8
DCS
40 "vdbox0",
41 "vdbox1",
42 "vdbox2",
43 "vdbox3",
44 "vebox0",
45 "vebox1",
05a2fb15
MK
46};
47
48const char *
48c1026a 49intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
05a2fb15 50{
53abb679 51 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
05a2fb15
MK
52
53 if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
54 return forcewake_domain_names[id];
55
56 WARN_ON(id);
57
58 return "unknown";
59}
60
535d8d27 61#define fw_ack(d) readl((d)->reg_ack)
159367bb
DCS
62#define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set)
63#define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set)
535d8d27 64
05a2fb15 65static inline void
159367bb 66fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
907b28c5 67{
26376a7e
OM
68 /*
69 * We don't really know if the powerwell for the forcewake domain we are
70 * trying to reset here does exist at this point (engines could be fused
71 * off in ICL+), so no waiting for acks
72 */
159367bb
DCS
73 /* WaRsClearFWBitsAtReset:bdw,skl */
74 fw_clear(d, 0xffff);
907b28c5
CW
75}
76
05a2fb15
MK
77static inline void
78fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
907b28c5 79{
a57a4a67
TU
80 d->wake_count++;
81 hrtimer_start_range_ns(&d->timer,
8b0e1953 82 NSEC_PER_MSEC,
a57a4a67
TU
83 NSEC_PER_MSEC,
84 HRTIMER_MODE_REL);
907b28c5
CW
85}
86
71306303 87static inline int
535d8d27 88__wait_for_ack(const struct intel_uncore_forcewake_domain *d,
71306303
MK
89 const u32 ack,
90 const u32 value)
91{
535d8d27 92 return wait_for_atomic((fw_ack(d) & ack) == value,
71306303
MK
93 FORCEWAKE_ACK_TIMEOUT_MS);
94}
95
96static inline int
535d8d27 97wait_ack_clear(const struct intel_uncore_forcewake_domain *d,
71306303
MK
98 const u32 ack)
99{
535d8d27 100 return __wait_for_ack(d, ack, 0);
71306303
MK
101}
102
103static inline int
535d8d27 104wait_ack_set(const struct intel_uncore_forcewake_domain *d,
71306303
MK
105 const u32 ack)
106{
535d8d27 107 return __wait_for_ack(d, ack, ack);
71306303
MK
108}
109
05a2fb15 110static inline void
535d8d27 111fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
907b28c5 112{
535d8d27 113 if (wait_ack_clear(d, FORCEWAKE_KERNEL))
05a2fb15
MK
114 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
115 intel_uncore_forcewake_domain_to_str(d->id));
116}
907b28c5 117
71306303
MK
118enum ack_type {
119 ACK_CLEAR = 0,
120 ACK_SET
121};
122
123static int
535d8d27 124fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d,
71306303
MK
125 const enum ack_type type)
126{
127 const u32 ack_bit = FORCEWAKE_KERNEL;
128 const u32 value = type == ACK_SET ? ack_bit : 0;
129 unsigned int pass;
130 bool ack_detected;
131
132 /*
133 * There is a possibility of driver's wake request colliding
134 * with hardware's own wake requests and that can cause
135 * hardware to not deliver the driver's ack message.
136 *
137 * Use a fallback bit toggle to kick the gpu state machine
138 * in the hope that the original ack will be delivered along with
139 * the fallback ack.
140 *
cc38cae7
OM
141 * This workaround is described in HSDES #1604254524 and it's known as:
142 * WaRsForcewakeAddDelayForAck:skl,bxt,kbl,glk,cfl,cnl,icl
143 * although the name is a bit misleading.
71306303
MK
144 */
145
146 pass = 1;
147 do {
535d8d27 148 wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK);
71306303 149
159367bb 150 fw_set(d, FORCEWAKE_KERNEL_FALLBACK);
71306303
MK
151 /* Give gt some time to relax before the polling frenzy */
152 udelay(10 * pass);
535d8d27 153 wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK);
71306303 154
535d8d27 155 ack_detected = (fw_ack(d) & ack_bit) == value;
71306303 156
159367bb 157 fw_clear(d, FORCEWAKE_KERNEL_FALLBACK);
71306303
MK
158 } while (!ack_detected && pass++ < 10);
159
160 DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n",
161 intel_uncore_forcewake_domain_to_str(d->id),
162 type == ACK_SET ? "set" : "clear",
535d8d27 163 fw_ack(d),
71306303
MK
164 pass);
165
166 return ack_detected ? 0 : -ETIMEDOUT;
167}
168
169static inline void
535d8d27 170fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d)
71306303 171{
535d8d27 172 if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL)))
71306303
MK
173 return;
174
535d8d27
DCS
175 if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR))
176 fw_domain_wait_ack_clear(d);
71306303
MK
177}
178
05a2fb15 179static inline void
159367bb 180fw_domain_get(const struct intel_uncore_forcewake_domain *d)
05a2fb15 181{
159367bb 182 fw_set(d, FORCEWAKE_KERNEL);
05a2fb15 183}
907b28c5 184
05a2fb15 185static inline void
535d8d27 186fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
05a2fb15 187{
535d8d27 188 if (wait_ack_set(d, FORCEWAKE_KERNEL))
05a2fb15
MK
189 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
190 intel_uncore_forcewake_domain_to_str(d->id));
191}
907b28c5 192
71306303 193static inline void
535d8d27 194fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d)
71306303 195{
535d8d27 196 if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL)))
71306303
MK
197 return;
198
535d8d27
DCS
199 if (fw_domain_wait_ack_with_fallback(d, ACK_SET))
200 fw_domain_wait_ack_set(d);
71306303
MK
201}
202
05a2fb15 203static inline void
159367bb 204fw_domain_put(const struct intel_uncore_forcewake_domain *d)
05a2fb15 205{
159367bb 206 fw_clear(d, FORCEWAKE_KERNEL);
907b28c5
CW
207}
208
05a2fb15 209static void
f568eeee 210fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
907b28c5 211{
05a2fb15 212 struct intel_uncore_forcewake_domain *d;
d2dc94bc 213 unsigned int tmp;
907b28c5 214
535d8d27 215 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
d2dc94bc 216
f568eeee 217 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
535d8d27 218 fw_domain_wait_ack_clear(d);
159367bb 219 fw_domain_get(d);
05a2fb15 220 }
4e1176dd 221
f568eeee 222 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
535d8d27 223 fw_domain_wait_ack_set(d);
71306303 224
535d8d27 225 uncore->fw_domains_active |= fw_domains;
71306303
MK
226}
227
228static void
f568eeee 229fw_domains_get_with_fallback(struct intel_uncore *uncore,
71306303
MK
230 enum forcewake_domains fw_domains)
231{
232 struct intel_uncore_forcewake_domain *d;
233 unsigned int tmp;
234
535d8d27 235 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
71306303 236
f568eeee 237 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
535d8d27 238 fw_domain_wait_ack_clear_fallback(d);
159367bb 239 fw_domain_get(d);
71306303
MK
240 }
241
f568eeee 242 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
535d8d27 243 fw_domain_wait_ack_set_fallback(d);
b8473050 244
535d8d27 245 uncore->fw_domains_active |= fw_domains;
05a2fb15 246}
907b28c5 247
05a2fb15 248static void
f568eeee 249fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
05a2fb15
MK
250{
251 struct intel_uncore_forcewake_domain *d;
d2dc94bc
CW
252 unsigned int tmp;
253
535d8d27 254 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
907b28c5 255
f568eeee 256 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
159367bb 257 fw_domain_put(d);
b8473050 258
535d8d27 259 uncore->fw_domains_active &= ~fw_domains;
05a2fb15 260}
907b28c5 261
05a2fb15 262static void
f568eeee 263fw_domains_reset(struct intel_uncore *uncore,
577ac4bd 264 enum forcewake_domains fw_domains)
05a2fb15
MK
265{
266 struct intel_uncore_forcewake_domain *d;
d2dc94bc 267 unsigned int tmp;
05a2fb15 268
d2dc94bc 269 if (!fw_domains)
3225b2f9 270 return;
f9b3927a 271
535d8d27 272 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
d2dc94bc 273
f568eeee 274 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
159367bb 275 fw_domain_reset(d);
05a2fb15
MK
276}
277
6ebc9692 278static inline u32 gt_thread_status(struct intel_uncore *uncore)
a5b22b5e
CW
279{
280 u32 val;
281
6cc5ca76 282 val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG);
a5b22b5e
CW
283 val &= GEN6_GT_THREAD_STATUS_CORE_MASK;
284
285 return val;
286}
287
6ebc9692 288static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
05a2fb15 289{
a5b22b5e
CW
290 /*
291 * w/a for a sporadic read returning 0 by waiting for the GT
05a2fb15
MK
292 * thread to wake up.
293 */
6ebc9692 294 WARN_ONCE(wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
a5b22b5e 295 "GT thread status wait timed out\n");
05a2fb15
MK
296}
297
f568eeee 298static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
48c1026a 299 enum forcewake_domains fw_domains)
05a2fb15 300{
f568eeee 301 fw_domains_get(uncore, fw_domains);
907b28c5 302
05a2fb15 303 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
6ebc9692 304 __gen6_gt_wait_for_thread_c0(uncore);
907b28c5
CW
305}
306
6ebc9692 307static inline u32 fifo_free_entries(struct intel_uncore *uncore)
c32e3788 308{
6cc5ca76 309 u32 count = __raw_uncore_read32(uncore, GTFIFOCTL);
c32e3788
DG
310
311 return count & GT_FIFO_FREE_ENTRIES_MASK;
312}
313
6ebc9692 314static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
907b28c5 315{
6b07b6d2 316 u32 n;
907b28c5 317
5135d64b
D
318 /* On VLV, FIFO will be shared by both SW and HW.
319 * So, we need to read the FREE_ENTRIES everytime */
6ebc9692
DCS
320 if (IS_VALLEYVIEW(uncore_to_i915(uncore)))
321 n = fifo_free_entries(uncore);
6b07b6d2 322 else
272c7e52 323 n = uncore->fifo_count;
6b07b6d2
MK
324
325 if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
6ebc9692 326 if (wait_for_atomic((n = fifo_free_entries(uncore)) >
6b07b6d2
MK
327 GT_FIFO_NUM_RESERVED_ENTRIES,
328 GT_FIFO_TIMEOUT_MS)) {
329 DRM_DEBUG("GT_FIFO timeout, entries: %u\n", n);
330 return;
907b28c5 331 }
907b28c5 332 }
907b28c5 333
272c7e52 334 uncore->fifo_count = n - 1;
907b28c5
CW
335}
336
a57a4a67
TU
337static enum hrtimer_restart
338intel_uncore_fw_release_timer(struct hrtimer *timer)
38cff0b1 339{
a57a4a67
TU
340 struct intel_uncore_forcewake_domain *domain =
341 container_of(timer, struct intel_uncore_forcewake_domain, timer);
f568eeee 342 struct intel_uncore *uncore = forcewake_domain_to_uncore(domain);
b2cff0db 343 unsigned long irqflags;
38cff0b1 344
f568eeee 345 assert_rpm_device_not_suspended(uncore_to_i915(uncore));
38cff0b1 346
c9e0c6da
CW
347 if (xchg(&domain->active, false))
348 return HRTIMER_RESTART;
349
f568eeee 350 spin_lock_irqsave(&uncore->lock, irqflags);
b2cff0db
CW
351 if (WARN_ON(domain->wake_count == 0))
352 domain->wake_count++;
353
b8473050 354 if (--domain->wake_count == 0)
f568eeee 355 uncore->funcs.force_wake_put(uncore, domain->mask);
b2cff0db 356
f568eeee 357 spin_unlock_irqrestore(&uncore->lock, irqflags);
a57a4a67
TU
358
359 return HRTIMER_NORESTART;
38cff0b1
ZW
360}
361
a5266db4 362/* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
d60996ab 363static unsigned int
f568eeee 364intel_uncore_forcewake_reset(struct intel_uncore *uncore)
38cff0b1 365{
48c1026a 366 unsigned long irqflags;
b2cff0db 367 struct intel_uncore_forcewake_domain *domain;
48c1026a 368 int retry_count = 100;
003342a5 369 enum forcewake_domains fw, active_domains;
38cff0b1 370
a5266db4
HG
371 iosf_mbi_assert_punit_acquired();
372
b2cff0db
CW
373 /* Hold uncore.lock across reset to prevent any register access
374 * with forcewake not set correctly. Wait until all pending
375 * timers are run before holding.
376 */
377 while (1) {
d2dc94bc
CW
378 unsigned int tmp;
379
b2cff0db 380 active_domains = 0;
38cff0b1 381
f568eeee 382 for_each_fw_domain(domain, uncore, tmp) {
c9e0c6da 383 smp_store_mb(domain->active, false);
a57a4a67 384 if (hrtimer_cancel(&domain->timer) == 0)
b2cff0db 385 continue;
38cff0b1 386
a57a4a67 387 intel_uncore_fw_release_timer(&domain->timer);
b2cff0db 388 }
aec347ab 389
f568eeee 390 spin_lock_irqsave(&uncore->lock, irqflags);
b2ec142c 391
f568eeee 392 for_each_fw_domain(domain, uncore, tmp) {
a57a4a67 393 if (hrtimer_active(&domain->timer))
33c582c1 394 active_domains |= domain->mask;
b2cff0db 395 }
3123fcaf 396
b2cff0db
CW
397 if (active_domains == 0)
398 break;
aec347ab 399
b2cff0db
CW
400 if (--retry_count == 0) {
401 DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
402 break;
403 }
0294ae7b 404
f568eeee 405 spin_unlock_irqrestore(&uncore->lock, irqflags);
b2cff0db
CW
406 cond_resched();
407 }
0294ae7b 408
b2cff0db
CW
409 WARN_ON(active_domains);
410
f568eeee 411 fw = uncore->fw_domains_active;
b2cff0db 412 if (fw)
f568eeee 413 uncore->funcs.force_wake_put(uncore, fw);
ef46e0d2 414
f568eeee
DCS
415 fw_domains_reset(uncore, uncore->fw_domains);
416 assert_forcewakes_inactive(uncore);
b2cff0db 417
f568eeee 418 spin_unlock_irqrestore(&uncore->lock, irqflags);
d60996ab
CW
419
420 return fw; /* track the lost user forcewake domains */
ef46e0d2
DV
421}
422
c02e85a0
MK
423static u64 gen9_edram_size(struct drm_i915_private *dev_priv)
424{
425 const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
426 const unsigned int sets[4] = { 1, 1, 2, 2 };
427 const u32 cap = dev_priv->edram_cap;
428
429 return EDRAM_NUM_BANKS(cap) *
430 ways[EDRAM_WAYS_IDX(cap)] *
431 sets[EDRAM_SETS_IDX(cap)] *
432 1024 * 1024;
433}
434
3accaf7e 435u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv)
907b28c5 436{
3accaf7e
MK
437 if (!HAS_EDRAM(dev_priv))
438 return 0;
439
c02e85a0
MK
440 /* The needed capability bits for size calculation
441 * are not there with pre gen9 so return 128MB always.
3accaf7e 442 */
c02e85a0
MK
443 if (INTEL_GEN(dev_priv) < 9)
444 return 128 * 1024 * 1024;
3accaf7e 445
c02e85a0 446 return gen9_edram_size(dev_priv);
3accaf7e 447}
907b28c5 448
3accaf7e
MK
449static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv)
450{
451 if (IS_HASWELL(dev_priv) ||
452 IS_BROADWELL(dev_priv) ||
453 INTEL_GEN(dev_priv) >= 9) {
6cc5ca76
DCS
454 dev_priv->edram_cap = __raw_uncore_read32(&dev_priv->uncore,
455 HSW_EDRAM_CAP);
3accaf7e
MK
456
457 /* NB: We can't write IDICR yet because we do not have gt funcs
18ce3994 458 * set up */
3accaf7e
MK
459 } else {
460 dev_priv->edram_cap = 0;
18ce3994 461 }
3accaf7e
MK
462
463 if (HAS_EDRAM(dev_priv))
464 DRM_INFO("Found %lluMB of eDRAM\n",
465 intel_uncore_edram_size(dev_priv) / (1024 * 1024));
f9b3927a
MK
466}
467
8a47eb19 468static bool
6ebc9692 469fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore)
8a47eb19
MK
470{
471 u32 dbg;
472
6cc5ca76 473 dbg = __raw_uncore_read32(uncore, FPGA_DBG);
8a47eb19
MK
474 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
475 return false;
476
6cc5ca76 477 __raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
8a47eb19
MK
478
479 return true;
480}
481
8ac3e1bb 482static bool
6ebc9692 483vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore)
8ac3e1bb
MK
484{
485 u32 cer;
486
6cc5ca76 487 cer = __raw_uncore_read32(uncore, CLAIM_ER);
8ac3e1bb
MK
488 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
489 return false;
490
6cc5ca76 491 __raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR);
8ac3e1bb
MK
492
493 return true;
494}
495
a338908c 496static bool
6ebc9692 497gen6_check_for_fifo_debug(struct intel_uncore *uncore)
a338908c
MK
498{
499 u32 fifodbg;
500
6cc5ca76 501 fifodbg = __raw_uncore_read32(uncore, GTFIFODBG);
a338908c
MK
502
503 if (unlikely(fifodbg)) {
504 DRM_DEBUG_DRIVER("GTFIFODBG = 0x08%x\n", fifodbg);
6cc5ca76 505 __raw_uncore_write32(uncore, GTFIFODBG, fifodbg);
a338908c
MK
506 }
507
508 return fifodbg;
509}
510
8ac3e1bb
MK
511static bool
512check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
513{
6ebc9692 514 struct intel_uncore *uncore = &dev_priv->uncore;
a338908c
MK
515 bool ret = false;
516
8ac3e1bb 517 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
6ebc9692 518 ret |= fpga_check_for_unclaimed_mmio(uncore);
8ac3e1bb
MK
519
520 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6ebc9692 521 ret |= vlv_check_for_unclaimed_mmio(uncore);
a338908c 522
f3ce44a0 523 if (IS_GEN_RANGE(dev_priv, 6, 7))
6ebc9692 524 ret |= gen6_check_for_fifo_debug(uncore);
8ac3e1bb 525
a338908c 526 return ret;
8ac3e1bb
MK
527}
528
f7de5027 529static void __intel_uncore_early_sanitize(struct intel_uncore *uncore,
d60996ab 530 unsigned int restore_forcewake)
f9b3927a 531{
f7de5027
DCS
532 struct drm_i915_private *i915 = uncore_to_i915(uncore);
533
8a47eb19 534 /* clear out unclaimed reg detection bit */
f7de5027 535 if (check_for_unclaimed_mmio(i915))
8a47eb19 536 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
907b28c5 537
a04f90a3 538 /* WaDisableShadowRegForCpd:chv */
f7de5027 539 if (IS_CHERRYVIEW(i915)) {
6cc5ca76
DCS
540 __raw_uncore_write32(uncore, GTFIFOCTL,
541 __raw_uncore_read32(uncore, GTFIFOCTL) |
542 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
543 GT_FIFO_CTL_RC6_POLICY_STALL);
a04f90a3
D
544 }
545
a5266db4 546 iosf_mbi_punit_acquire();
f7de5027 547 intel_uncore_forcewake_reset(uncore);
d60996ab 548 if (restore_forcewake) {
f7de5027
DCS
549 spin_lock_irq(&uncore->lock);
550 uncore->funcs.force_wake_get(uncore, restore_forcewake);
551
552 if (IS_GEN_RANGE(i915, 6, 7))
6ebc9692 553 uncore->fifo_count = fifo_free_entries(uncore);
f7de5027 554 spin_unlock_irq(&uncore->lock);
d60996ab 555 }
a5266db4 556 iosf_mbi_punit_release();
521198a2
MK
557}
558
f7de5027 559void intel_uncore_suspend(struct intel_uncore *uncore)
ed493883 560{
a5266db4
HG
561 iosf_mbi_punit_acquire();
562 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
f7de5027
DCS
563 &uncore->pmic_bus_access_nb);
564 uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore);
a5266db4 565 iosf_mbi_punit_release();
68f60946
HG
566}
567
f7de5027 568void intel_uncore_resume_early(struct intel_uncore *uncore)
68f60946 569{
d60996ab
CW
570 unsigned int restore_forcewake;
571
f7de5027
DCS
572 restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
573 __intel_uncore_early_sanitize(uncore, restore_forcewake);
d60996ab 574
f7de5027 575 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
ed493883
ID
576}
577
f7de5027 578void intel_uncore_runtime_resume(struct intel_uncore *uncore)
bedf4d79 579{
f7de5027 580 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
bedf4d79
HG
581}
582
dc97997a 583void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
521198a2 584{
907b28c5 585 /* BIOS often leaves RC6 enabled, but disable it for hw init */
54b4f68f 586 intel_sanitize_gt_powersave(dev_priv);
907b28c5
CW
587}
588
f568eeee 589static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
a6111f7b
CW
590 enum forcewake_domains fw_domains)
591{
592 struct intel_uncore_forcewake_domain *domain;
d2dc94bc 593 unsigned int tmp;
a6111f7b 594
f568eeee 595 fw_domains &= uncore->fw_domains;
a6111f7b 596
f568eeee 597 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
c9e0c6da 598 if (domain->wake_count++) {
33c582c1 599 fw_domains &= ~domain->mask;
c9e0c6da
CW
600 domain->active = true;
601 }
602 }
a6111f7b 603
b8473050 604 if (fw_domains)
f568eeee 605 uncore->funcs.force_wake_get(uncore, fw_domains);
a6111f7b
CW
606}
607
59bad947
MK
608/**
609 * intel_uncore_forcewake_get - grab forcewake domain references
3ceea6a1 610 * @uncore: the intel_uncore structure
59bad947
MK
611 * @fw_domains: forcewake domains to get reference on
612 *
613 * This function can be used get GT's forcewake domain references.
614 * Normal register access will handle the forcewake domains automatically.
615 * However if some sequence requires the GT to not power down a particular
616 * forcewake domains this function should be called at the beginning of the
617 * sequence. And subsequently the reference should be dropped by symmetric
618 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
619 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
907b28c5 620 */
3ceea6a1 621void intel_uncore_forcewake_get(struct intel_uncore *uncore,
48c1026a 622 enum forcewake_domains fw_domains)
907b28c5
CW
623{
624 unsigned long irqflags;
625
f568eeee 626 if (!uncore->funcs.force_wake_get)
ab484f8f
BW
627 return;
628
3ceea6a1 629 assert_rpm_wakelock_held(uncore_to_i915(uncore));
c8c8fb33 630
f568eeee
DCS
631 spin_lock_irqsave(&uncore->lock, irqflags);
632 __intel_uncore_forcewake_get(uncore, fw_domains);
633 spin_unlock_irqrestore(&uncore->lock, irqflags);
907b28c5
CW
634}
635
d7a133d8
CW
636/**
637 * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
3ceea6a1 638 * @uncore: the intel_uncore structure
d7a133d8
CW
639 *
640 * This function is a wrapper around intel_uncore_forcewake_get() to acquire
641 * the GT powerwell and in the process disable our debugging for the
642 * duration of userspace's bypass.
643 */
3ceea6a1 644void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
d7a133d8 645{
f568eeee
DCS
646 spin_lock_irq(&uncore->lock);
647 if (!uncore->user_forcewake.count++) {
3ceea6a1 648 intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
d7a133d8
CW
649
650 /* Save and disable mmio debugging for the user bypass */
f568eeee
DCS
651 uncore->user_forcewake.saved_mmio_check =
652 uncore->unclaimed_mmio_check;
653 uncore->user_forcewake.saved_mmio_debug =
4f044a88 654 i915_modparams.mmio_debug;
d7a133d8 655
f568eeee 656 uncore->unclaimed_mmio_check = 0;
4f044a88 657 i915_modparams.mmio_debug = 0;
d7a133d8 658 }
f568eeee 659 spin_unlock_irq(&uncore->lock);
d7a133d8
CW
660}
661
662/**
663 * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
3ceea6a1 664 * @uncore: the intel_uncore structure
d7a133d8
CW
665 *
666 * This function complements intel_uncore_forcewake_user_get() and releases
667 * the GT powerwell taken on behalf of the userspace bypass.
668 */
3ceea6a1 669void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
d7a133d8 670{
3ceea6a1 671 struct drm_i915_private *i915 = uncore_to_i915(uncore);
f568eeee
DCS
672
673 spin_lock_irq(&uncore->lock);
674 if (!--uncore->user_forcewake.count) {
3ceea6a1
DCS
675 if (intel_uncore_unclaimed_mmio(i915))
676 dev_info(i915->drm.dev,
d7a133d8
CW
677 "Invalid mmio detected during user access\n");
678
f568eeee
DCS
679 uncore->unclaimed_mmio_check =
680 uncore->user_forcewake.saved_mmio_check;
4f044a88 681 i915_modparams.mmio_debug =
f568eeee 682 uncore->user_forcewake.saved_mmio_debug;
d7a133d8 683
3ceea6a1 684 intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
d7a133d8 685 }
f568eeee 686 spin_unlock_irq(&uncore->lock);
d7a133d8
CW
687}
688
59bad947 689/**
a6111f7b 690 * intel_uncore_forcewake_get__locked - grab forcewake domain references
3ceea6a1 691 * @uncore: the intel_uncore structure
a6111f7b 692 * @fw_domains: forcewake domains to get reference on
59bad947 693 *
a6111f7b
CW
694 * See intel_uncore_forcewake_get(). This variant places the onus
695 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
907b28c5 696 */
3ceea6a1 697void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
a6111f7b
CW
698 enum forcewake_domains fw_domains)
699{
f568eeee
DCS
700 lockdep_assert_held(&uncore->lock);
701
702 if (!uncore->funcs.force_wake_get)
a6111f7b
CW
703 return;
704
f568eeee 705 __intel_uncore_forcewake_get(uncore, fw_domains);
a6111f7b
CW
706}
707
f568eeee 708static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
a6111f7b 709 enum forcewake_domains fw_domains)
907b28c5 710{
b2cff0db 711 struct intel_uncore_forcewake_domain *domain;
d2dc94bc 712 unsigned int tmp;
907b28c5 713
f568eeee 714 fw_domains &= uncore->fw_domains;
b2cff0db 715
f568eeee 716 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
b2cff0db
CW
717 if (WARN_ON(domain->wake_count == 0))
718 continue;
719
c9e0c6da
CW
720 if (--domain->wake_count) {
721 domain->active = true;
b2cff0db 722 continue;
c9e0c6da 723 }
b2cff0db 724
05a2fb15 725 fw_domain_arm_timer(domain);
aec347ab 726 }
a6111f7b 727}
dc9fb09c 728
a6111f7b
CW
729/**
730 * intel_uncore_forcewake_put - release a forcewake domain reference
3ceea6a1 731 * @uncore: the intel_uncore structure
a6111f7b
CW
732 * @fw_domains: forcewake domains to put references
733 *
734 * This function drops the device-level forcewakes for specified
735 * domains obtained by intel_uncore_forcewake_get().
736 */
3ceea6a1 737void intel_uncore_forcewake_put(struct intel_uncore *uncore,
a6111f7b
CW
738 enum forcewake_domains fw_domains)
739{
740 unsigned long irqflags;
741
f568eeee 742 if (!uncore->funcs.force_wake_put)
a6111f7b
CW
743 return;
744
f568eeee
DCS
745 spin_lock_irqsave(&uncore->lock, irqflags);
746 __intel_uncore_forcewake_put(uncore, fw_domains);
747 spin_unlock_irqrestore(&uncore->lock, irqflags);
907b28c5
CW
748}
749
a6111f7b
CW
750/**
751 * intel_uncore_forcewake_put__locked - grab forcewake domain references
3ceea6a1 752 * @uncore: the intel_uncore structure
a6111f7b
CW
753 * @fw_domains: forcewake domains to get reference on
754 *
755 * See intel_uncore_forcewake_put(). This variant places the onus
756 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
757 */
3ceea6a1 758void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
a6111f7b
CW
759 enum forcewake_domains fw_domains)
760{
f568eeee
DCS
761 lockdep_assert_held(&uncore->lock);
762
763 if (!uncore->funcs.force_wake_put)
a6111f7b
CW
764 return;
765
f568eeee 766 __intel_uncore_forcewake_put(uncore, fw_domains);
a6111f7b
CW
767}
768
f568eeee 769void assert_forcewakes_inactive(struct intel_uncore *uncore)
e998c40f 770{
f568eeee 771 if (!uncore->funcs.force_wake_get)
e998c40f
PZ
772 return;
773
f568eeee 774 WARN(uncore->fw_domains_active,
67e64564 775 "Expected all fw_domains to be inactive, but %08x are still on\n",
f568eeee 776 uncore->fw_domains_active);
67e64564
CW
777}
778
f568eeee 779void assert_forcewakes_active(struct intel_uncore *uncore,
67e64564
CW
780 enum forcewake_domains fw_domains)
781{
f568eeee 782 if (!uncore->funcs.force_wake_get)
67e64564
CW
783 return;
784
f568eeee 785 assert_rpm_wakelock_held(uncore_to_i915(uncore));
67e64564 786
f568eeee
DCS
787 fw_domains &= uncore->fw_domains;
788 WARN(fw_domains & ~uncore->fw_domains_active,
67e64564 789 "Expected %08x fw_domains to be active, but %08x are off\n",
f568eeee 790 fw_domains, fw_domains & ~uncore->fw_domains_active);
e998c40f
PZ
791}
792
907b28c5 793/* We give fast paths for the really cool registers */
40181697 794#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
907b28c5 795
a89a70a8
DCS
796#define GEN11_NEEDS_FORCE_WAKE(reg) \
797 ((reg) < 0x40000 || ((reg) >= 0x1c0000 && (reg) < 0x1dc000))
798
272c7e52 799#define __gen6_reg_read_fw_domains(uncore, offset) \
6863b76c
TU
800({ \
801 enum forcewake_domains __fwd; \
802 if (NEEDS_FORCE_WAKE(offset)) \
803 __fwd = FORCEWAKE_RENDER; \
804 else \
805 __fwd = 0; \
806 __fwd; \
807})
808
9480dbf0 809static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
91e630b9 810{
91e630b9
TU
811 if (offset < entry->start)
812 return -1;
813 else if (offset > entry->end)
814 return 1;
815 else
816 return 0;
817}
818
9480dbf0
TU
819/* Copied and "macroized" from lib/bsearch.c */
820#define BSEARCH(key, base, num, cmp) ({ \
821 unsigned int start__ = 0, end__ = (num); \
822 typeof(base) result__ = NULL; \
823 while (start__ < end__) { \
824 unsigned int mid__ = start__ + (end__ - start__) / 2; \
825 int ret__ = (cmp)((key), (base) + mid__); \
826 if (ret__ < 0) { \
827 end__ = mid__; \
828 } else if (ret__ > 0) { \
829 start__ = mid__ + 1; \
830 } else { \
831 result__ = (base) + mid__; \
832 break; \
833 } \
834 } \
835 result__; \
836})
837
9fc1117c 838static enum forcewake_domains
cb7ee690 839find_fw_domain(struct intel_uncore *uncore, u32 offset)
9fc1117c 840{
9480dbf0 841 const struct intel_forcewake_range *entry;
9fc1117c 842
9480dbf0 843 entry = BSEARCH(offset,
cb7ee690
DCS
844 uncore->fw_domains_table,
845 uncore->fw_domains_table_entries,
91e630b9 846 fw_range_cmp);
38fb6a40 847
99191427
JL
848 if (!entry)
849 return 0;
850
a89a70a8
DCS
851 /*
852 * The list of FW domains depends on the SKU in gen11+ so we
853 * can't determine it statically. We use FORCEWAKE_ALL and
854 * translate it here to the list of available domains.
855 */
856 if (entry->domains == FORCEWAKE_ALL)
cb7ee690 857 return uncore->fw_domains;
a89a70a8 858
cb7ee690 859 WARN(entry->domains & ~uncore->fw_domains,
99191427 860 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
cb7ee690 861 entry->domains & ~uncore->fw_domains, offset);
99191427
JL
862
863 return entry->domains;
9fc1117c
TU
864}
865
866#define GEN_FW_RANGE(s, e, d) \
867 { .start = (s), .end = (e), .domains = (d) }
1938e59a 868
895833bd 869#define HAS_FWTABLE(dev_priv) \
3d16ca58 870 (INTEL_GEN(dev_priv) >= 9 || \
895833bd
TU
871 IS_CHERRYVIEW(dev_priv) || \
872 IS_VALLEYVIEW(dev_priv))
873
b0081239 874/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
9fc1117c
TU
875static const struct intel_forcewake_range __vlv_fw_ranges[] = {
876 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
877 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
878 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
9fc1117c
TU
879 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
880 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
b0081239 881 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
9fc1117c
TU
882 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
883};
1938e59a 884
272c7e52 885#define __fwtable_reg_read_fw_domains(uncore, offset) \
6863b76c
TU
886({ \
887 enum forcewake_domains __fwd = 0; \
0dd356bb 888 if (NEEDS_FORCE_WAKE((offset))) \
272c7e52 889 __fwd = find_fw_domain(uncore, offset); \
6863b76c
TU
890 __fwd; \
891})
892
272c7e52 893#define __gen11_fwtable_reg_read_fw_domains(uncore, offset) \
a89a70a8
DCS
894({ \
895 enum forcewake_domains __fwd = 0; \
896 if (GEN11_NEEDS_FORCE_WAKE((offset))) \
272c7e52 897 __fwd = find_fw_domain(uncore, offset); \
a89a70a8
DCS
898 __fwd; \
899})
900
47188574 901/* *Must* be sorted by offset! See intel_shadow_table_check(). */
6863b76c 902static const i915_reg_t gen8_shadowed_regs[] = {
47188574
TU
903 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
904 GEN6_RPNSWREQ, /* 0xA008 */
905 GEN6_RC_VIDEO_FREQ, /* 0xA00C */
906 RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */
907 RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */
908 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
6863b76c
TU
909 /* TODO: Other registers are not yet used */
910};
911
a89a70a8
DCS
912static const i915_reg_t gen11_shadowed_regs[] = {
913 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
914 GEN6_RPNSWREQ, /* 0xA008 */
915 GEN6_RC_VIDEO_FREQ, /* 0xA00C */
916 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
917 RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */
918 RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */
919 RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */
920 RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */
921 RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */
922 RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */
923 /* TODO: Other registers are not yet used */
924};
925
9480dbf0 926static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
5a659383 927{
9480dbf0 928 u32 offset = i915_mmio_reg_offset(*reg);
5a659383 929
9480dbf0 930 if (key < offset)
5a659383 931 return -1;
9480dbf0 932 else if (key > offset)
5a659383
TU
933 return 1;
934 else
935 return 0;
936}
937
a89a70a8
DCS
938#define __is_genX_shadowed(x) \
939static bool is_gen##x##_shadowed(u32 offset) \
940{ \
941 const i915_reg_t *regs = gen##x##_shadowed_regs; \
942 return BSEARCH(offset, regs, ARRAY_SIZE(gen##x##_shadowed_regs), \
943 mmio_reg_cmp); \
6863b76c
TU
944}
945
a89a70a8
DCS
946__is_genX_shadowed(8)
947__is_genX_shadowed(11)
948
272c7e52 949#define __gen8_reg_write_fw_domains(uncore, offset) \
6863b76c
TU
950({ \
951 enum forcewake_domains __fwd; \
952 if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
953 __fwd = FORCEWAKE_RENDER; \
954 else \
955 __fwd = 0; \
956 __fwd; \
957})
958
b0081239 959/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
9fc1117c
TU
960static const struct intel_forcewake_range __chv_fw_ranges[] = {
961 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
b0081239 962 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c 963 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
b0081239 964 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c 965 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
b0081239 966 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c 967 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
b0081239
TU
968 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
969 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
9fc1117c 970 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
b0081239
TU
971 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
972 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c
TU
973 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
974 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
975 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
976 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
9fc1117c 977};
38fb6a40 978
272c7e52 979#define __fwtable_reg_write_fw_domains(uncore, offset) \
6863b76c
TU
980({ \
981 enum forcewake_domains __fwd = 0; \
0dd356bb 982 if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
272c7e52 983 __fwd = find_fw_domain(uncore, offset); \
6863b76c
TU
984 __fwd; \
985})
986
272c7e52 987#define __gen11_fwtable_reg_write_fw_domains(uncore, offset) \
a89a70a8
DCS
988({ \
989 enum forcewake_domains __fwd = 0; \
990 if (GEN11_NEEDS_FORCE_WAKE((offset)) && !is_gen11_shadowed(offset)) \
272c7e52 991 __fwd = find_fw_domain(uncore, offset); \
a89a70a8
DCS
992 __fwd; \
993})
994
b0081239 995/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
9fc1117c 996static const struct intel_forcewake_range __gen9_fw_ranges[] = {
0dd356bb 997 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
9fc1117c
TU
998 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
999 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
0dd356bb 1000 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
9fc1117c 1001 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
0dd356bb 1002 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
9fc1117c 1003 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
0dd356bb 1004 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER),
b0081239 1005 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
9fc1117c 1006 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
0dd356bb 1007 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
9fc1117c 1008 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
0dd356bb 1009 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
b0081239 1010 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
0dd356bb 1011 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER),
9fc1117c 1012 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
0dd356bb 1013 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
9fc1117c 1014 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
0dd356bb 1015 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
b0081239 1016 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
78424c92 1017 GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER),
9fc1117c 1018 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
0dd356bb 1019 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER),
b0081239 1020 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
0dd356bb 1021 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER),
9fc1117c 1022 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
0dd356bb 1023 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER),
9fc1117c 1024 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
0dd356bb 1025 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER),
b0081239 1026 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
0dd356bb 1027 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER),
9fc1117c
TU
1028 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
1029};
6863b76c 1030
a89a70a8
DCS
1031/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
1032static const struct intel_forcewake_range __gen11_fw_ranges[] = {
1033 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
1034 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
1035 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1036 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
1037 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1038 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
1039 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1040 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_BLITTER),
1041 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1042 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
1043 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1044 GEN_FW_RANGE(0x8500, 0x8bff, FORCEWAKE_BLITTER),
1045 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1046 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
1047 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL),
1048 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
1049 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1050 GEN_FW_RANGE(0xb480, 0xdfff, FORCEWAKE_BLITTER),
1051 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
1052 GEN_FW_RANGE(0xe900, 0x243ff, FORCEWAKE_BLITTER),
1053 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
1054 GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER),
1055 GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1056 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
1057 GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1),
1058 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0),
1059 GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_BLITTER),
1060 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
1061 GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3),
1062 GEN_FW_RANGE(0x1d8000, 0x1dbfff, FORCEWAKE_MEDIA_VEBOX1)
1063};
1064
907b28c5 1065static void
6ebc9692 1066ilk_dummy_write(struct intel_uncore *uncore)
907b28c5
CW
1067{
1068 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
1069 * the chip from rc6 before touching it for real. MI_MODE is masked,
1070 * hence harmless to write 0 into. */
6cc5ca76 1071 __raw_uncore_write32(uncore, MI_MODE, 0);
907b28c5
CW
1072}
1073
1074static void
9c053501
MK
1075__unclaimed_reg_debug(struct drm_i915_private *dev_priv,
1076 const i915_reg_t reg,
1077 const bool read,
1078 const bool before)
907b28c5 1079{
dda96033
CW
1080 if (WARN(check_for_unclaimed_mmio(dev_priv) && !before,
1081 "Unclaimed %s register 0x%x\n",
1082 read ? "read from" : "write to",
4bd0a25d 1083 i915_mmio_reg_offset(reg)))
4f044a88
MW
1084 /* Only report the first N failures */
1085 i915_modparams.mmio_debug--;
907b28c5
CW
1086}
1087
9c053501
MK
1088static inline void
1089unclaimed_reg_debug(struct drm_i915_private *dev_priv,
1090 const i915_reg_t reg,
1091 const bool read,
1092 const bool before)
1093{
4f044a88 1094 if (likely(!i915_modparams.mmio_debug))
9c053501
MK
1095 return;
1096
1097 __unclaimed_reg_debug(dev_priv, reg, read, before);
1098}
1099
51f67885 1100#define GEN2_READ_HEADER(x) \
6ebc9692 1101 struct intel_uncore *uncore = &dev_priv->uncore; \
5d738795 1102 u##x val = 0; \
da5827c3 1103 assert_rpm_wakelock_held(dev_priv);
5d738795 1104
51f67885 1105#define GEN2_READ_FOOTER \
5d738795
BW
1106 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1107 return val
1108
51f67885 1109#define __gen2_read(x) \
0b274481 1110static u##x \
f0f59a00 1111gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
51f67885 1112 GEN2_READ_HEADER(x); \
6cc5ca76 1113 val = __raw_uncore_read##x(uncore, reg); \
51f67885 1114 GEN2_READ_FOOTER; \
3967018e
BW
1115}
1116
1117#define __gen5_read(x) \
1118static u##x \
f0f59a00 1119gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
51f67885 1120 GEN2_READ_HEADER(x); \
6ebc9692 1121 ilk_dummy_write(uncore); \
6cc5ca76 1122 val = __raw_uncore_read##x(uncore, reg); \
51f67885 1123 GEN2_READ_FOOTER; \
3967018e
BW
1124}
1125
51f67885
CW
1126__gen5_read(8)
1127__gen5_read(16)
1128__gen5_read(32)
1129__gen5_read(64)
1130__gen2_read(8)
1131__gen2_read(16)
1132__gen2_read(32)
1133__gen2_read(64)
1134
1135#undef __gen5_read
1136#undef __gen2_read
1137
1138#undef GEN2_READ_FOOTER
1139#undef GEN2_READ_HEADER
1140
1141#define GEN6_READ_HEADER(x) \
272c7e52 1142 struct intel_uncore *uncore = &dev_priv->uncore; \
f0f59a00 1143 u32 offset = i915_mmio_reg_offset(reg); \
51f67885
CW
1144 unsigned long irqflags; \
1145 u##x val = 0; \
da5827c3 1146 assert_rpm_wakelock_held(dev_priv); \
272c7e52 1147 spin_lock_irqsave(&uncore->lock, irqflags); \
9c053501 1148 unclaimed_reg_debug(dev_priv, reg, true, true)
51f67885
CW
1149
1150#define GEN6_READ_FOOTER \
9c053501 1151 unclaimed_reg_debug(dev_priv, reg, true, false); \
272c7e52 1152 spin_unlock_irqrestore(&uncore->lock, irqflags); \
51f67885
CW
1153 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1154 return val
1155
f568eeee 1156static noinline void ___force_wake_auto(struct intel_uncore *uncore,
c521b0c8 1157 enum forcewake_domains fw_domains)
b2cff0db
CW
1158{
1159 struct intel_uncore_forcewake_domain *domain;
d2dc94bc
CW
1160 unsigned int tmp;
1161
f568eeee 1162 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
b2cff0db 1163
f568eeee 1164 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)
c521b0c8
TU
1165 fw_domain_arm_timer(domain);
1166
f568eeee 1167 uncore->funcs.force_wake_get(uncore, fw_domains);
c521b0c8
TU
1168}
1169
f568eeee 1170static inline void __force_wake_auto(struct intel_uncore *uncore,
c521b0c8
TU
1171 enum forcewake_domains fw_domains)
1172{
b2cff0db
CW
1173 if (WARN_ON(!fw_domains))
1174 return;
1175
003342a5 1176 /* Turn on all requested but inactive supported forcewake domains. */
f568eeee
DCS
1177 fw_domains &= uncore->fw_domains;
1178 fw_domains &= ~uncore->fw_domains_active;
b2cff0db 1179
c521b0c8 1180 if (fw_domains)
f568eeee 1181 ___force_wake_auto(uncore, fw_domains);
b2cff0db
CW
1182}
1183
ccfceda2 1184#define __gen_read(func, x) \
3967018e 1185static u##x \
ccfceda2 1186func##_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
6863b76c 1187 enum forcewake_domains fw_engine; \
51f67885 1188 GEN6_READ_HEADER(x); \
272c7e52 1189 fw_engine = __##func##_reg_read_fw_domains(uncore, offset); \
6a42d0f4 1190 if (fw_engine) \
272c7e52 1191 __force_wake_auto(uncore, fw_engine); \
6cc5ca76 1192 val = __raw_uncore_read##x(uncore, reg); \
51f67885 1193 GEN6_READ_FOOTER; \
940aece4 1194}
ccfceda2
DCS
1195#define __gen6_read(x) __gen_read(gen6, x)
1196#define __fwtable_read(x) __gen_read(fwtable, x)
a89a70a8 1197#define __gen11_fwtable_read(x) __gen_read(gen11_fwtable, x)
940aece4 1198
a89a70a8
DCS
1199__gen11_fwtable_read(8)
1200__gen11_fwtable_read(16)
1201__gen11_fwtable_read(32)
1202__gen11_fwtable_read(64)
6044c4a3
TU
1203__fwtable_read(8)
1204__fwtable_read(16)
1205__fwtable_read(32)
1206__fwtable_read(64)
3967018e
BW
1207__gen6_read(8)
1208__gen6_read(16)
1209__gen6_read(32)
1210__gen6_read(64)
3967018e 1211
a89a70a8 1212#undef __gen11_fwtable_read
6044c4a3 1213#undef __fwtable_read
3967018e 1214#undef __gen6_read
51f67885
CW
1215#undef GEN6_READ_FOOTER
1216#undef GEN6_READ_HEADER
5d738795 1217
51f67885 1218#define GEN2_WRITE_HEADER \
6ebc9692 1219 struct intel_uncore *uncore = &dev_priv->uncore; \
5d738795 1220 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
da5827c3 1221 assert_rpm_wakelock_held(dev_priv); \
907b28c5 1222
51f67885 1223#define GEN2_WRITE_FOOTER
0d965301 1224
51f67885 1225#define __gen2_write(x) \
0b274481 1226static void \
f0f59a00 1227gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
51f67885 1228 GEN2_WRITE_HEADER; \
6cc5ca76 1229 __raw_uncore_write##x(uncore, reg, val); \
51f67885 1230 GEN2_WRITE_FOOTER; \
4032ef43
BW
1231}
1232
1233#define __gen5_write(x) \
1234static void \
f0f59a00 1235gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
51f67885 1236 GEN2_WRITE_HEADER; \
6ebc9692 1237 ilk_dummy_write(uncore); \
6cc5ca76 1238 __raw_uncore_write##x(uncore, reg, val); \
51f67885 1239 GEN2_WRITE_FOOTER; \
4032ef43
BW
1240}
1241
51f67885
CW
1242__gen5_write(8)
1243__gen5_write(16)
1244__gen5_write(32)
51f67885
CW
1245__gen2_write(8)
1246__gen2_write(16)
1247__gen2_write(32)
51f67885
CW
1248
1249#undef __gen5_write
1250#undef __gen2_write
1251
1252#undef GEN2_WRITE_FOOTER
1253#undef GEN2_WRITE_HEADER
1254
1255#define GEN6_WRITE_HEADER \
272c7e52 1256 struct intel_uncore *uncore = &dev_priv->uncore; \
f0f59a00 1257 u32 offset = i915_mmio_reg_offset(reg); \
51f67885
CW
1258 unsigned long irqflags; \
1259 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
da5827c3 1260 assert_rpm_wakelock_held(dev_priv); \
272c7e52 1261 spin_lock_irqsave(&uncore->lock, irqflags); \
9c053501 1262 unclaimed_reg_debug(dev_priv, reg, false, true)
51f67885
CW
1263
1264#define GEN6_WRITE_FOOTER \
9c053501 1265 unclaimed_reg_debug(dev_priv, reg, false, false); \
272c7e52 1266 spin_unlock_irqrestore(&uncore->lock, irqflags)
51f67885 1267
4032ef43
BW
1268#define __gen6_write(x) \
1269static void \
f0f59a00 1270gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
51f67885 1271 GEN6_WRITE_HEADER; \
a338908c 1272 if (NEEDS_FORCE_WAKE(offset)) \
6ebc9692 1273 __gen6_gt_wait_for_fifo(uncore); \
6cc5ca76 1274 __raw_uncore_write##x(uncore, reg, val); \
51f67885 1275 GEN6_WRITE_FOOTER; \
4032ef43
BW
1276}
1277
ccfceda2 1278#define __gen_write(func, x) \
ab2aa47e 1279static void \
ccfceda2 1280func##_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
6863b76c 1281 enum forcewake_domains fw_engine; \
51f67885 1282 GEN6_WRITE_HEADER; \
272c7e52 1283 fw_engine = __##func##_reg_write_fw_domains(uncore, offset); \
6a42d0f4 1284 if (fw_engine) \
272c7e52 1285 __force_wake_auto(uncore, fw_engine); \
6cc5ca76 1286 __raw_uncore_write##x(uncore, reg, val); \
51f67885 1287 GEN6_WRITE_FOOTER; \
1938e59a 1288}
ccfceda2
DCS
1289#define __gen8_write(x) __gen_write(gen8, x)
1290#define __fwtable_write(x) __gen_write(fwtable, x)
a89a70a8 1291#define __gen11_fwtable_write(x) __gen_write(gen11_fwtable, x)
1938e59a 1292
a89a70a8
DCS
1293__gen11_fwtable_write(8)
1294__gen11_fwtable_write(16)
1295__gen11_fwtable_write(32)
22d48c55
TU
1296__fwtable_write(8)
1297__fwtable_write(16)
1298__fwtable_write(32)
ab2aa47e
BW
1299__gen8_write(8)
1300__gen8_write(16)
1301__gen8_write(32)
4032ef43
BW
1302__gen6_write(8)
1303__gen6_write(16)
1304__gen6_write(32)
4032ef43 1305
a89a70a8 1306#undef __gen11_fwtable_write
22d48c55 1307#undef __fwtable_write
ab2aa47e 1308#undef __gen8_write
4032ef43 1309#undef __gen6_write
51f67885
CW
1310#undef GEN6_WRITE_FOOTER
1311#undef GEN6_WRITE_HEADER
907b28c5 1312
f7de5027 1313#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
43d942a7 1314do { \
f7de5027
DCS
1315 (uncore)->funcs.mmio_writeb = x##_write8; \
1316 (uncore)->funcs.mmio_writew = x##_write16; \
1317 (uncore)->funcs.mmio_writel = x##_write32; \
43d942a7
YZ
1318} while (0)
1319
f7de5027 1320#define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
43d942a7 1321do { \
f7de5027
DCS
1322 (uncore)->funcs.mmio_readb = x##_read8; \
1323 (uncore)->funcs.mmio_readw = x##_read16; \
1324 (uncore)->funcs.mmio_readl = x##_read32; \
1325 (uncore)->funcs.mmio_readq = x##_read64; \
43d942a7
YZ
1326} while (0)
1327
05a2fb15 1328
f7de5027 1329static void fw_domain_init(struct intel_uncore *uncore,
48c1026a 1330 enum forcewake_domain_id domain_id,
f0f59a00
VS
1331 i915_reg_t reg_set,
1332 i915_reg_t reg_ack)
05a2fb15
MK
1333{
1334 struct intel_uncore_forcewake_domain *d;
1335
1336 if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
1337 return;
1338
535d8d27 1339 d = &uncore->fw_domain[domain_id];
05a2fb15
MK
1340
1341 WARN_ON(d->wake_count);
1342
6e3955a5
CW
1343 WARN_ON(!i915_mmio_reg_valid(reg_set));
1344 WARN_ON(!i915_mmio_reg_valid(reg_ack));
1345
05a2fb15 1346 d->wake_count = 0;
25286aac
DCS
1347 d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set);
1348 d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack);
05a2fb15 1349
05a2fb15
MK
1350 d->id = domain_id;
1351
33c582c1
TU
1352 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
1353 BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
1354 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
a89a70a8
DCS
1355 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0));
1356 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1));
1357 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2));
1358 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3));
1359 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0));
1360 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1));
1361
33c582c1 1362
d2dc94bc 1363 d->mask = BIT(domain_id);
33c582c1 1364
a57a4a67
TU
1365 hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1366 d->timer.function = intel_uncore_fw_release_timer;
05a2fb15 1367
535d8d27 1368 uncore->fw_domains |= BIT(domain_id);
f9b3927a 1369
159367bb 1370 fw_domain_reset(d);
05a2fb15
MK
1371}
1372
f7de5027 1373static void fw_domain_fini(struct intel_uncore *uncore,
26376a7e
OM
1374 enum forcewake_domain_id domain_id)
1375{
1376 struct intel_uncore_forcewake_domain *d;
1377
1378 if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
1379 return;
1380
f7de5027 1381 d = &uncore->fw_domain[domain_id];
26376a7e
OM
1382
1383 WARN_ON(d->wake_count);
1384 WARN_ON(hrtimer_cancel(&d->timer));
1385 memset(d, 0, sizeof(*d));
1386
f7de5027 1387 uncore->fw_domains &= ~BIT(domain_id);
26376a7e
OM
1388}
1389
f7de5027 1390static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
0b274481 1391{
f7de5027
DCS
1392 struct drm_i915_private *i915 = uncore_to_i915(uncore);
1393
1394 if (INTEL_GEN(i915) <= 5 || intel_vgpu_active(i915))
3225b2f9
MK
1395 return;
1396
f7de5027 1397 if (INTEL_GEN(i915) >= 11) {
a89a70a8
DCS
1398 int i;
1399
f7de5027 1400 uncore->funcs.force_wake_get =
cc38cae7 1401 fw_domains_get_with_fallback;
f7de5027
DCS
1402 uncore->funcs.force_wake_put = fw_domains_put;
1403 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
a89a70a8
DCS
1404 FORCEWAKE_RENDER_GEN9,
1405 FORCEWAKE_ACK_RENDER_GEN9);
f7de5027 1406 fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
a89a70a8
DCS
1407 FORCEWAKE_BLITTER_GEN9,
1408 FORCEWAKE_ACK_BLITTER_GEN9);
1409 for (i = 0; i < I915_MAX_VCS; i++) {
f7de5027 1410 if (!HAS_ENGINE(i915, _VCS(i)))
a89a70a8
DCS
1411 continue;
1412
f7de5027 1413 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
a89a70a8
DCS
1414 FORCEWAKE_MEDIA_VDBOX_GEN11(i),
1415 FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
1416 }
1417 for (i = 0; i < I915_MAX_VECS; i++) {
f7de5027 1418 if (!HAS_ENGINE(i915, _VECS(i)))
a89a70a8
DCS
1419 continue;
1420
f7de5027 1421 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
a89a70a8
DCS
1422 FORCEWAKE_MEDIA_VEBOX_GEN11(i),
1423 FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
1424 }
f7de5027
DCS
1425 } else if (IS_GEN_RANGE(i915, 9, 10)) {
1426 uncore->funcs.force_wake_get =
71306303 1427 fw_domains_get_with_fallback;
f7de5027
DCS
1428 uncore->funcs.force_wake_put = fw_domains_put;
1429 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
05a2fb15
MK
1430 FORCEWAKE_RENDER_GEN9,
1431 FORCEWAKE_ACK_RENDER_GEN9);
f7de5027 1432 fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
05a2fb15
MK
1433 FORCEWAKE_BLITTER_GEN9,
1434 FORCEWAKE_ACK_BLITTER_GEN9);
f7de5027 1435 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
05a2fb15 1436 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
f7de5027
DCS
1437 } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
1438 uncore->funcs.force_wake_get = fw_domains_get;
1439 uncore->funcs.force_wake_put = fw_domains_put;
1440 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
05a2fb15 1441 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
f7de5027 1442 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
05a2fb15 1443 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
f7de5027
DCS
1444 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
1445 uncore->funcs.force_wake_get =
05a2fb15 1446 fw_domains_get_with_thread_status;
f7de5027
DCS
1447 uncore->funcs.force_wake_put = fw_domains_put;
1448 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
05a2fb15 1449 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
f7de5027 1450 } else if (IS_IVYBRIDGE(i915)) {
0b274481
BW
1451 u32 ecobus;
1452
1453 /* IVB configs may use multi-threaded forcewake */
1454
1455 /* A small trick here - if the bios hasn't configured
1456 * MT forcewake, and if the device is in RC6, then
1457 * force_wake_mt_get will not wake the device and the
1458 * ECOBUS read will return zero. Which will be
1459 * (correctly) interpreted by the test below as MT
1460 * forcewake being disabled.
1461 */
f7de5027 1462 uncore->funcs.force_wake_get =
05a2fb15 1463 fw_domains_get_with_thread_status;
f7de5027 1464 uncore->funcs.force_wake_put = fw_domains_put;
05a2fb15 1465
f9b3927a
MK
1466 /* We need to init first for ECOBUS access and then
1467 * determine later if we want to reinit, in case of MT access is
6ea2556f
MK
1468 * not working. In this stage we don't know which flavour this
1469 * ivb is, so it is better to reset also the gen6 fw registers
1470 * before the ecobus check.
f9b3927a 1471 */
6ea2556f 1472
6cc5ca76 1473 __raw_uncore_write32(uncore, FORCEWAKE, 0);
6ebc9692 1474 __raw_posting_read(uncore, ECOBUS);
6ea2556f 1475
f7de5027 1476 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
05a2fb15 1477 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
f9b3927a 1478
f7de5027
DCS
1479 spin_lock_irq(&uncore->lock);
1480 fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
6cc5ca76 1481 ecobus = __raw_uncore_read32(uncore, ECOBUS);
f7de5027
DCS
1482 fw_domains_put(uncore, FORCEWAKE_RENDER);
1483 spin_unlock_irq(&uncore->lock);
0b274481 1484
05a2fb15 1485 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
0b274481
BW
1486 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1487 DRM_INFO("when using vblank-synced partial screen updates.\n");
f7de5027 1488 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
05a2fb15 1489 FORCEWAKE, FORCEWAKE_ACK);
0b274481 1490 }
f7de5027
DCS
1491 } else if (IS_GEN(i915, 6)) {
1492 uncore->funcs.force_wake_get =
05a2fb15 1493 fw_domains_get_with_thread_status;
f7de5027
DCS
1494 uncore->funcs.force_wake_put = fw_domains_put;
1495 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
05a2fb15 1496 FORCEWAKE, FORCEWAKE_ACK);
0b274481 1497 }
3225b2f9
MK
1498
1499 /* All future platforms are expected to require complex power gating */
f7de5027 1500 WARN_ON(uncore->fw_domains == 0);
f9b3927a
MK
1501}
1502
f7de5027 1503#define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \
15157970 1504{ \
f7de5027 1505 (uncore)->fw_domains_table = \
15157970 1506 (struct intel_forcewake_range *)(d); \
f7de5027 1507 (uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \
15157970
TU
1508}
1509
264ec1a8
HG
1510static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
1511 unsigned long action, void *data)
1512{
1513 struct drm_i915_private *dev_priv = container_of(nb,
1514 struct drm_i915_private, uncore.pmic_bus_access_nb);
1515
1516 switch (action) {
1517 case MBI_PMIC_BUS_ACCESS_BEGIN:
1518 /*
1519 * forcewake all now to make sure that we don't need to do a
1520 * forcewake later which on systems where this notifier gets
1521 * called requires the punit to access to the shared pmic i2c
1522 * bus, which will be busy after this notification, leading to:
1523 * "render: timed out waiting for forcewake ack request."
1524 * errors.
ce30560c
HG
1525 *
1526 * The notifier is unregistered during intel_runtime_suspend(),
1527 * so it's ok to access the HW here without holding a RPM
1528 * wake reference -> disable wakeref asserts for the time of
1529 * the access.
264ec1a8 1530 */
ce30560c 1531 disable_rpm_wakeref_asserts(dev_priv);
3ceea6a1 1532 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
ce30560c 1533 enable_rpm_wakeref_asserts(dev_priv);
264ec1a8
HG
1534 break;
1535 case MBI_PMIC_BUS_ACCESS_END:
3ceea6a1 1536 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
264ec1a8
HG
1537 break;
1538 }
1539
1540 return NOTIFY_OK;
1541}
1542
25286aac
DCS
1543static int uncore_mmio_setup(struct intel_uncore *uncore)
1544{
1545 struct drm_i915_private *i915 = uncore_to_i915(uncore);
1546 struct pci_dev *pdev = i915->drm.pdev;
1547 int mmio_bar;
1548 int mmio_size;
1549
1550 mmio_bar = IS_GEN(i915, 2) ? 1 : 0;
1551 /*
1552 * Before gen4, the registers and the GTT are behind different BARs.
1553 * However, from gen4 onwards, the registers and the GTT are shared
1554 * in the same BAR, so we want to restrict this ioremap from
1555 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
1556 * the register BAR remains the same size for all the earlier
1557 * generations up to Ironlake.
1558 */
1559 if (INTEL_GEN(i915) < 5)
1560 mmio_size = 512 * 1024;
1561 else
1562 mmio_size = 2 * 1024 * 1024;
1563 uncore->regs = pci_iomap(pdev, mmio_bar, mmio_size);
1564 if (uncore->regs == NULL) {
1565 DRM_ERROR("failed to map registers\n");
1566
1567 return -EIO;
1568 }
1569
1570 return 0;
1571}
1572
1573static void uncore_mmio_cleanup(struct intel_uncore *uncore)
1574{
1575 struct drm_i915_private *i915 = uncore_to_i915(uncore);
1576 struct pci_dev *pdev = i915->drm.pdev;
1577
1578 pci_iounmap(pdev, uncore->regs);
1579}
1580
1581
1582int intel_uncore_init(struct intel_uncore *uncore)
f9b3927a 1583{
f7de5027 1584 struct drm_i915_private *i915 = uncore_to_i915(uncore);
25286aac
DCS
1585 int ret;
1586
1587 ret = uncore_mmio_setup(uncore);
1588 if (ret)
1589 return ret;
f7de5027
DCS
1590
1591 i915_check_vgpu(i915);
cf9d2890 1592
f7de5027
DCS
1593 intel_uncore_edram_detect(i915);
1594 intel_uncore_fw_domains_init(uncore);
1595 __intel_uncore_early_sanitize(uncore, 0);
0b274481 1596
f7de5027
DCS
1597 uncore->unclaimed_mmio_check = 1;
1598 uncore->pmic_bus_access_nb.notifier_call =
264ec1a8 1599 i915_pmic_bus_access_notifier;
75714940 1600
f7de5027
DCS
1601 if (IS_GEN_RANGE(i915, 2, 4) || intel_vgpu_active(i915)) {
1602 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen2);
1603 ASSIGN_READ_MMIO_VFUNCS(uncore, gen2);
1604 } else if (IS_GEN(i915, 5)) {
1605 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen5);
1606 ASSIGN_READ_MMIO_VFUNCS(uncore, gen5);
1607 } else if (IS_GEN_RANGE(i915, 6, 7)) {
1608 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
1609
1610 if (IS_VALLEYVIEW(i915)) {
1611 ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges);
1612 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
e3b1895f 1613 } else {
f7de5027 1614 ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
85ee17eb 1615 }
f7de5027
DCS
1616 } else if (IS_GEN(i915, 8)) {
1617 if (IS_CHERRYVIEW(i915)) {
1618 ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
1619 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
1620 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
1938e59a
D
1621
1622 } else {
f7de5027
DCS
1623 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8);
1624 ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
1938e59a 1625 }
f7de5027
DCS
1626 } else if (IS_GEN_RANGE(i915, 9, 10)) {
1627 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
1628 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
1629 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
a89a70a8 1630 } else {
f7de5027
DCS
1631 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
1632 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable);
1633 ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
3967018e 1634 }
ed493883 1635
f7de5027 1636 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
25286aac
DCS
1637
1638 return 0;
0b274481
BW
1639}
1640
26376a7e
OM
1641/*
1642 * We might have detected that some engines are fused off after we initialized
1643 * the forcewake domains. Prune them, to make sure they only reference existing
1644 * engines.
1645 */
f7de5027 1646void intel_uncore_prune(struct intel_uncore *uncore)
26376a7e 1647{
f7de5027
DCS
1648 struct drm_i915_private *i915 = uncore_to_i915(uncore);
1649
1650 if (INTEL_GEN(i915) >= 11) {
1651 enum forcewake_domains fw_domains = uncore->fw_domains;
26376a7e
OM
1652 enum forcewake_domain_id domain_id;
1653 int i;
1654
1655 for (i = 0; i < I915_MAX_VCS; i++) {
1656 domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
1657
f7de5027 1658 if (HAS_ENGINE(i915, _VCS(i)))
26376a7e
OM
1659 continue;
1660
1661 if (fw_domains & BIT(domain_id))
f7de5027 1662 fw_domain_fini(uncore, domain_id);
26376a7e
OM
1663 }
1664
1665 for (i = 0; i < I915_MAX_VECS; i++) {
1666 domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
1667
f7de5027 1668 if (HAS_ENGINE(i915, _VECS(i)))
26376a7e
OM
1669 continue;
1670
1671 if (fw_domains & BIT(domain_id))
f7de5027 1672 fw_domain_fini(uncore, domain_id);
26376a7e
OM
1673 }
1674 }
1675}
1676
f7de5027 1677void intel_uncore_fini(struct intel_uncore *uncore)
0b274481 1678{
0b274481 1679 /* Paranoia: make sure we have disabled everything before we exit. */
f7de5027 1680 intel_uncore_sanitize(uncore_to_i915(uncore));
a5266db4
HG
1681
1682 iosf_mbi_punit_acquire();
1683 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
f7de5027
DCS
1684 &uncore->pmic_bus_access_nb);
1685 intel_uncore_forcewake_reset(uncore);
a5266db4 1686 iosf_mbi_punit_release();
25286aac 1687 uncore_mmio_cleanup(uncore);
0b274481
BW
1688}
1689
3fd3a6ff
JL
1690static const struct reg_whitelist {
1691 i915_reg_t offset_ldw;
1692 i915_reg_t offset_udw;
1693 u16 gen_mask;
1694 u8 size;
1695} reg_read_whitelist[] = { {
1696 .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
1697 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
164daaf2 1698 .gen_mask = INTEL_GEN_MASK(4, 11),
3fd3a6ff
JL
1699 .size = 8
1700} };
907b28c5
CW
1701
1702int i915_reg_read_ioctl(struct drm_device *dev,
1703 void *data, struct drm_file *file)
1704{
fac5e23e 1705 struct drm_i915_private *dev_priv = to_i915(dev);
907b28c5 1706 struct drm_i915_reg_read *reg = data;
3fd3a6ff 1707 struct reg_whitelist const *entry;
538ef96b 1708 intel_wakeref_t wakeref;
3fd3a6ff
JL
1709 unsigned int flags;
1710 int remain;
1711 int ret = 0;
1712
1713 entry = reg_read_whitelist;
1714 remain = ARRAY_SIZE(reg_read_whitelist);
1715 while (remain) {
1716 u32 entry_offset = i915_mmio_reg_offset(entry->offset_ldw);
1717
1718 GEM_BUG_ON(!is_power_of_2(entry->size));
1719 GEM_BUG_ON(entry->size > 8);
1720 GEM_BUG_ON(entry_offset & (entry->size - 1));
1721
1722 if (INTEL_INFO(dev_priv)->gen_mask & entry->gen_mask &&
1723 entry_offset == (reg->offset & -entry->size))
907b28c5 1724 break;
3fd3a6ff
JL
1725 entry++;
1726 remain--;
907b28c5
CW
1727 }
1728
3fd3a6ff 1729 if (!remain)
907b28c5
CW
1730 return -EINVAL;
1731
3fd3a6ff 1732 flags = reg->offset & (entry->size - 1);
648a9bc5 1733
d4225a53
CW
1734 with_intel_runtime_pm(dev_priv, wakeref) {
1735 if (entry->size == 8 && flags == I915_REG_READ_8B_WA)
1736 reg->val = I915_READ64_2x32(entry->offset_ldw,
1737 entry->offset_udw);
1738 else if (entry->size == 8 && flags == 0)
1739 reg->val = I915_READ64(entry->offset_ldw);
1740 else if (entry->size == 4 && flags == 0)
1741 reg->val = I915_READ(entry->offset_ldw);
1742 else if (entry->size == 2 && flags == 0)
1743 reg->val = I915_READ16(entry->offset_ldw);
1744 else if (entry->size == 1 && flags == 0)
1745 reg->val = I915_READ8(entry->offset_ldw);
1746 else
1747 ret = -EINVAL;
1748 }
3fd3a6ff 1749
cf67c70f 1750 return ret;
907b28c5
CW
1751}
1752
1758b90e 1753/**
1d1a9774 1754 * __intel_wait_for_register_fw - wait until register matches expected state
1758b90e
CW
1755 * @dev_priv: the i915 device
1756 * @reg: the register to read
1757 * @mask: mask to apply to register value
1758 * @value: expected value
1d1a9774
MW
1759 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
1760 * @slow_timeout_ms: slow timeout in millisecond
1761 * @out_value: optional placeholder to hold registry value
1758b90e
CW
1762 *
1763 * This routine waits until the target register @reg contains the expected
3d466cd6
DV
1764 * @value after applying the @mask, i.e. it waits until ::
1765 *
1766 * (I915_READ_FW(reg) & mask) == value
1767 *
1d1a9774 1768 * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
6976e74b 1769 * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
84d84cb7 1770 * must be not larger than 20,0000 microseconds.
1758b90e
CW
1771 *
1772 * Note that this routine assumes the caller holds forcewake asserted, it is
1773 * not suitable for very long waits. See intel_wait_for_register() if you
1774 * wish to wait without holding forcewake for the duration (i.e. you expect
1775 * the wait to be slow).
1776 *
1777 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1778 */
1d1a9774
MW
1779int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
1780 i915_reg_t reg,
3fc7d86b
MW
1781 u32 mask,
1782 u32 value,
1783 unsigned int fast_timeout_us,
1784 unsigned int slow_timeout_ms,
1d1a9774 1785 u32 *out_value)
1758b90e 1786{
ff26ffa8 1787 u32 uninitialized_var(reg_value);
1d1a9774
MW
1788#define done (((reg_value = I915_READ_FW(reg)) & mask) == value)
1789 int ret;
1790
6976e74b 1791 /* Catch any overuse of this function */
84d84cb7
CW
1792 might_sleep_if(slow_timeout_ms);
1793 GEM_BUG_ON(fast_timeout_us > 20000);
6976e74b 1794
84d84cb7
CW
1795 ret = -ETIMEDOUT;
1796 if (fast_timeout_us && fast_timeout_us <= 20000)
1d1a9774 1797 ret = _wait_for_atomic(done, fast_timeout_us, 0);
ff26ffa8 1798 if (ret && slow_timeout_ms)
1d1a9774 1799 ret = wait_for(done, slow_timeout_ms);
84d84cb7 1800
1d1a9774
MW
1801 if (out_value)
1802 *out_value = reg_value;
84d84cb7 1803
1758b90e
CW
1804 return ret;
1805#undef done
1806}
1807
1808/**
23fdbdd7 1809 * __intel_wait_for_register - wait until register matches expected state
1758b90e
CW
1810 * @dev_priv: the i915 device
1811 * @reg: the register to read
1812 * @mask: mask to apply to register value
1813 * @value: expected value
23fdbdd7
SP
1814 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
1815 * @slow_timeout_ms: slow timeout in millisecond
1816 * @out_value: optional placeholder to hold registry value
1758b90e
CW
1817 *
1818 * This routine waits until the target register @reg contains the expected
3d466cd6
DV
1819 * @value after applying the @mask, i.e. it waits until ::
1820 *
1821 * (I915_READ(reg) & mask) == value
1822 *
1758b90e
CW
1823 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
1824 *
1825 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1826 */
23fdbdd7 1827int __intel_wait_for_register(struct drm_i915_private *dev_priv,
1758b90e 1828 i915_reg_t reg,
3fc7d86b
MW
1829 u32 mask,
1830 u32 value,
23fdbdd7
SP
1831 unsigned int fast_timeout_us,
1832 unsigned int slow_timeout_ms,
1833 u32 *out_value)
7fd2d269 1834{
272c7e52 1835 struct intel_uncore *uncore = &dev_priv->uncore;
1758b90e
CW
1836 unsigned fw =
1837 intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
23fdbdd7 1838 u32 reg_value;
1758b90e
CW
1839 int ret;
1840
3df82dd4 1841 might_sleep_if(slow_timeout_ms);
05646543 1842
272c7e52
DCS
1843 spin_lock_irq(&uncore->lock);
1844 intel_uncore_forcewake_get__locked(uncore, fw);
05646543
CW
1845
1846 ret = __intel_wait_for_register_fw(dev_priv,
1847 reg, mask, value,
23fdbdd7 1848 fast_timeout_us, 0, &reg_value);
05646543 1849
272c7e52
DCS
1850 intel_uncore_forcewake_put__locked(uncore, fw);
1851 spin_unlock_irq(&uncore->lock);
05646543 1852
3df82dd4 1853 if (ret && slow_timeout_ms)
23fdbdd7
SP
1854 ret = __wait_for(reg_value = I915_READ_NOTRACE(reg),
1855 (reg_value & mask) == value,
1856 slow_timeout_ms * 1000, 10, 1000);
1857
39806c3f
VS
1858 /* just trace the final value */
1859 trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
1860
23fdbdd7
SP
1861 if (out_value)
1862 *out_value = reg_value;
1758b90e
CW
1863
1864 return ret;
d431440c
TE
1865}
1866
fc97618b 1867bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
907b28c5 1868{
fc97618b 1869 return check_for_unclaimed_mmio(dev_priv);
907b28c5 1870}
75714940 1871
bc3b9346 1872bool
75714940
MK
1873intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
1874{
272c7e52 1875 struct intel_uncore *uncore = &dev_priv->uncore;
a167b1e1
CW
1876 bool ret = false;
1877
272c7e52 1878 spin_lock_irq(&uncore->lock);
a167b1e1 1879
272c7e52 1880 if (unlikely(uncore->unclaimed_mmio_check <= 0))
a167b1e1 1881 goto out;
75714940
MK
1882
1883 if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
7ef4ac6e
CW
1884 if (!i915_modparams.mmio_debug) {
1885 DRM_DEBUG("Unclaimed register detected, "
1886 "enabling oneshot unclaimed register reporting. "
1887 "Please use i915.mmio_debug=N for more information.\n");
1888 i915_modparams.mmio_debug++;
1889 }
272c7e52 1890 uncore->unclaimed_mmio_check--;
a167b1e1 1891 ret = true;
75714940 1892 }
bc3b9346 1893
a167b1e1 1894out:
272c7e52 1895 spin_unlock_irq(&uncore->lock);
a167b1e1
CW
1896
1897 return ret;
75714940 1898}
3756685a
TU
1899
1900static enum forcewake_domains
1901intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
1902 i915_reg_t reg)
1903{
272c7e52 1904 struct intel_uncore *uncore = &dev_priv->uncore;
895833bd 1905 u32 offset = i915_mmio_reg_offset(reg);
3756685a
TU
1906 enum forcewake_domains fw_domains;
1907
a89a70a8 1908 if (INTEL_GEN(dev_priv) >= 11) {
272c7e52 1909 fw_domains = __gen11_fwtable_reg_read_fw_domains(uncore, offset);
a89a70a8 1910 } else if (HAS_FWTABLE(dev_priv)) {
272c7e52 1911 fw_domains = __fwtable_reg_read_fw_domains(uncore, offset);
895833bd 1912 } else if (INTEL_GEN(dev_priv) >= 6) {
272c7e52 1913 fw_domains = __gen6_reg_read_fw_domains(uncore, offset);
895833bd 1914 } else {
00690008 1915 WARN_ON(!IS_GEN_RANGE(dev_priv, 2, 5));
895833bd 1916 fw_domains = 0;
3756685a
TU
1917 }
1918
272c7e52 1919 WARN_ON(fw_domains & ~uncore->fw_domains);
3756685a
TU
1920
1921 return fw_domains;
1922}
1923
1924static enum forcewake_domains
1925intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
1926 i915_reg_t reg)
1927{
272c7e52 1928 struct intel_uncore *uncore = &dev_priv->uncore;
22d48c55 1929 u32 offset = i915_mmio_reg_offset(reg);
3756685a
TU
1930 enum forcewake_domains fw_domains;
1931
a89a70a8 1932 if (INTEL_GEN(dev_priv) >= 11) {
272c7e52 1933 fw_domains = __gen11_fwtable_reg_write_fw_domains(uncore, offset);
a89a70a8 1934 } else if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
272c7e52 1935 fw_domains = __fwtable_reg_write_fw_domains(uncore, offset);
cf819eff 1936 } else if (IS_GEN(dev_priv, 8)) {
272c7e52 1937 fw_domains = __gen8_reg_write_fw_domains(uncore, offset);
00690008 1938 } else if (IS_GEN_RANGE(dev_priv, 6, 7)) {
3756685a 1939 fw_domains = FORCEWAKE_RENDER;
22d48c55 1940 } else {
00690008 1941 WARN_ON(!IS_GEN_RANGE(dev_priv, 2, 5));
22d48c55 1942 fw_domains = 0;
3756685a
TU
1943 }
1944
272c7e52 1945 WARN_ON(fw_domains & ~uncore->fw_domains);
3756685a
TU
1946
1947 return fw_domains;
1948}
1949
1950/**
1951 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
1952 * a register
1953 * @dev_priv: pointer to struct drm_i915_private
1954 * @reg: register in question
1955 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
1956 *
1957 * Returns a set of forcewake domains required to be taken with for example
1958 * intel_uncore_forcewake_get for the specified register to be accessible in the
1959 * specified mode (read, write or read/write) with raw mmio accessors.
1960 *
1961 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
1962 * callers to do FIFO management on their own or risk losing writes.
1963 */
1964enum forcewake_domains
1965intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
1966 i915_reg_t reg, unsigned int op)
1967{
1968 enum forcewake_domains fw_domains = 0;
1969
1970 WARN_ON(!op);
1971
895833bd
TU
1972 if (intel_vgpu_active(dev_priv))
1973 return 0;
1974
3756685a
TU
1975 if (op & FW_REG_READ)
1976 fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg);
1977
1978 if (op & FW_REG_WRITE)
1979 fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg);
1980
1981 return fw_domains;
1982}
26e7a2a1
CW
1983
1984#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
0757ac8f 1985#include "selftests/mock_uncore.c"
26e7a2a1
CW
1986#include "selftests/intel_uncore.c"
1987#endif