drm/i915: rework some irq functions to take intel_gt as argument
[linux-2.6-block.git] / drivers / gpu / drm / i915 / intel_uncore.c
CommitLineData
907b28c5
CW
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
696173b0
JN
24#include <linux/pm_runtime.h>
25#include <asm/iosf_mbi.h>
26
bfac1e2b
MR
27#include "gt/intel_lrc_reg.h" /* for shadow reg list */
28
907b28c5 29#include "i915_drv.h"
a09d9a80 30#include "i915_trace.h"
cf9d2890 31#include "i915_vgpu.h"
696173b0 32#include "intel_pm.h"
6daccb0b 33
83e33372 34#define FORCEWAKE_ACK_TIMEOUT_MS 50
6b07b6d2 35#define GT_FIFO_TIMEOUT_MS 10
907b28c5 36
6cc5ca76 37#define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
6af5d92f 38
5716c8c6
DA
39static void
40fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
41{
42 uncore->fw_get_funcs->force_wake_get(uncore, fw_domains);
43}
44
0a9b2630
DCS
45void
46intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug)
47{
48 spin_lock_init(&mmio_debug->lock);
49 mmio_debug->unclaimed_mmio_check = 1;
50}
51
52static void mmio_debug_suspend(struct intel_uncore_mmio_debug *mmio_debug)
53{
54 lockdep_assert_held(&mmio_debug->lock);
55
56 /* Save and disable mmio debugging for the user bypass */
57 if (!mmio_debug->suspend_count++) {
58 mmio_debug->saved_mmio_check = mmio_debug->unclaimed_mmio_check;
59 mmio_debug->unclaimed_mmio_check = 0;
60 }
61}
62
63static void mmio_debug_resume(struct intel_uncore_mmio_debug *mmio_debug)
64{
65 lockdep_assert_held(&mmio_debug->lock);
66
67 if (!--mmio_debug->suspend_count)
68 mmio_debug->unclaimed_mmio_check = mmio_debug->saved_mmio_check;
69}
70
05a2fb15
MK
71static const char * const forcewake_domain_names[] = {
72 "render",
bc33e71f 73 "gt",
05a2fb15 74 "media",
a89a70a8
DCS
75 "vdbox0",
76 "vdbox1",
77 "vdbox2",
78 "vdbox3",
bfac1e2b
MR
79 "vdbox4",
80 "vdbox5",
81 "vdbox6",
82 "vdbox7",
a89a70a8
DCS
83 "vebox0",
84 "vebox1",
bfac1e2b
MR
85 "vebox2",
86 "vebox3",
05a2fb15
MK
87};
88
89const char *
48c1026a 90intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
05a2fb15 91{
53abb679 92 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
05a2fb15
MK
93
94 if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
95 return forcewake_domain_names[id];
96
97 WARN_ON(id);
98
99 return "unknown";
100}
101
535d8d27 102#define fw_ack(d) readl((d)->reg_ack)
159367bb
DCS
103#define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set)
104#define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set)
535d8d27 105
05a2fb15 106static inline void
159367bb 107fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
907b28c5 108{
26376a7e
OM
109 /*
110 * We don't really know if the powerwell for the forcewake domain we are
111 * trying to reset here does exist at this point (engines could be fused
112 * off in ICL+), so no waiting for acks
113 */
159367bb
DCS
114 /* WaRsClearFWBitsAtReset:bdw,skl */
115 fw_clear(d, 0xffff);
907b28c5
CW
116}
117
05a2fb15
MK
118static inline void
119fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
907b28c5 120{
77adbd8f
CW
121 GEM_BUG_ON(d->uncore->fw_domains_timer & d->mask);
122 d->uncore->fw_domains_timer |= d->mask;
a57a4a67
TU
123 d->wake_count++;
124 hrtimer_start_range_ns(&d->timer,
8b0e1953 125 NSEC_PER_MSEC,
a57a4a67
TU
126 NSEC_PER_MSEC,
127 HRTIMER_MODE_REL);
907b28c5
CW
128}
129
71306303 130static inline int
535d8d27 131__wait_for_ack(const struct intel_uncore_forcewake_domain *d,
71306303
MK
132 const u32 ack,
133 const u32 value)
134{
535d8d27 135 return wait_for_atomic((fw_ack(d) & ack) == value,
71306303
MK
136 FORCEWAKE_ACK_TIMEOUT_MS);
137}
138
139static inline int
535d8d27 140wait_ack_clear(const struct intel_uncore_forcewake_domain *d,
71306303
MK
141 const u32 ack)
142{
535d8d27 143 return __wait_for_ack(d, ack, 0);
71306303
MK
144}
145
146static inline int
535d8d27 147wait_ack_set(const struct intel_uncore_forcewake_domain *d,
71306303
MK
148 const u32 ack)
149{
535d8d27 150 return __wait_for_ack(d, ack, ack);
71306303
MK
151}
152
05a2fb15 153static inline void
535d8d27 154fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
907b28c5 155{
18ecc6c5 156 if (wait_ack_clear(d, FORCEWAKE_KERNEL)) {
05a2fb15
MK
157 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
158 intel_uncore_forcewake_domain_to_str(d->id));
65706203 159 add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
18ecc6c5 160 }
05a2fb15 161}
907b28c5 162
71306303
MK
163enum ack_type {
164 ACK_CLEAR = 0,
165 ACK_SET
166};
167
168static int
535d8d27 169fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d,
71306303
MK
170 const enum ack_type type)
171{
172 const u32 ack_bit = FORCEWAKE_KERNEL;
173 const u32 value = type == ACK_SET ? ack_bit : 0;
174 unsigned int pass;
175 bool ack_detected;
176
177 /*
178 * There is a possibility of driver's wake request colliding
179 * with hardware's own wake requests and that can cause
180 * hardware to not deliver the driver's ack message.
181 *
182 * Use a fallback bit toggle to kick the gpu state machine
183 * in the hope that the original ack will be delivered along with
184 * the fallback ack.
185 *
cc38cae7
OM
186 * This workaround is described in HSDES #1604254524 and it's known as:
187 * WaRsForcewakeAddDelayForAck:skl,bxt,kbl,glk,cfl,cnl,icl
188 * although the name is a bit misleading.
71306303
MK
189 */
190
191 pass = 1;
192 do {
535d8d27 193 wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK);
71306303 194
159367bb 195 fw_set(d, FORCEWAKE_KERNEL_FALLBACK);
71306303
MK
196 /* Give gt some time to relax before the polling frenzy */
197 udelay(10 * pass);
535d8d27 198 wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK);
71306303 199
535d8d27 200 ack_detected = (fw_ack(d) & ack_bit) == value;
71306303 201
159367bb 202 fw_clear(d, FORCEWAKE_KERNEL_FALLBACK);
71306303
MK
203 } while (!ack_detected && pass++ < 10);
204
205 DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n",
206 intel_uncore_forcewake_domain_to_str(d->id),
207 type == ACK_SET ? "set" : "clear",
535d8d27 208 fw_ack(d),
71306303
MK
209 pass);
210
211 return ack_detected ? 0 : -ETIMEDOUT;
212}
213
214static inline void
535d8d27 215fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d)
71306303 216{
535d8d27 217 if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL)))
71306303
MK
218 return;
219
535d8d27
DCS
220 if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR))
221 fw_domain_wait_ack_clear(d);
71306303
MK
222}
223
05a2fb15 224static inline void
159367bb 225fw_domain_get(const struct intel_uncore_forcewake_domain *d)
05a2fb15 226{
159367bb 227 fw_set(d, FORCEWAKE_KERNEL);
05a2fb15 228}
907b28c5 229
05a2fb15 230static inline void
535d8d27 231fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
05a2fb15 232{
18ecc6c5 233 if (wait_ack_set(d, FORCEWAKE_KERNEL)) {
05a2fb15
MK
234 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
235 intel_uncore_forcewake_domain_to_str(d->id));
65706203 236 add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
18ecc6c5 237 }
05a2fb15 238}
907b28c5 239
71306303 240static inline void
535d8d27 241fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d)
71306303 242{
535d8d27 243 if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL)))
71306303
MK
244 return;
245
535d8d27
DCS
246 if (fw_domain_wait_ack_with_fallback(d, ACK_SET))
247 fw_domain_wait_ack_set(d);
71306303
MK
248}
249
05a2fb15 250static inline void
159367bb 251fw_domain_put(const struct intel_uncore_forcewake_domain *d)
05a2fb15 252{
159367bb 253 fw_clear(d, FORCEWAKE_KERNEL);
907b28c5
CW
254}
255
05a2fb15 256static void
5716c8c6 257fw_domains_get_normal(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
907b28c5 258{
05a2fb15 259 struct intel_uncore_forcewake_domain *d;
d2dc94bc 260 unsigned int tmp;
907b28c5 261
535d8d27 262 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
d2dc94bc 263
f568eeee 264 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
535d8d27 265 fw_domain_wait_ack_clear(d);
159367bb 266 fw_domain_get(d);
05a2fb15 267 }
4e1176dd 268
f568eeee 269 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
535d8d27 270 fw_domain_wait_ack_set(d);
71306303 271
535d8d27 272 uncore->fw_domains_active |= fw_domains;
71306303
MK
273}
274
275static void
f568eeee 276fw_domains_get_with_fallback(struct intel_uncore *uncore,
71306303
MK
277 enum forcewake_domains fw_domains)
278{
279 struct intel_uncore_forcewake_domain *d;
280 unsigned int tmp;
281
535d8d27 282 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
71306303 283
f568eeee 284 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
535d8d27 285 fw_domain_wait_ack_clear_fallback(d);
159367bb 286 fw_domain_get(d);
71306303
MK
287 }
288
f568eeee 289 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
535d8d27 290 fw_domain_wait_ack_set_fallback(d);
b8473050 291
535d8d27 292 uncore->fw_domains_active |= fw_domains;
05a2fb15 293}
907b28c5 294
05a2fb15 295static void
f568eeee 296fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
05a2fb15
MK
297{
298 struct intel_uncore_forcewake_domain *d;
d2dc94bc
CW
299 unsigned int tmp;
300
535d8d27 301 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
907b28c5 302
f568eeee 303 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
159367bb 304 fw_domain_put(d);
b8473050 305
535d8d27 306 uncore->fw_domains_active &= ~fw_domains;
05a2fb15 307}
907b28c5 308
05a2fb15 309static void
f568eeee 310fw_domains_reset(struct intel_uncore *uncore,
577ac4bd 311 enum forcewake_domains fw_domains)
05a2fb15
MK
312{
313 struct intel_uncore_forcewake_domain *d;
d2dc94bc 314 unsigned int tmp;
05a2fb15 315
d2dc94bc 316 if (!fw_domains)
3225b2f9 317 return;
f9b3927a 318
535d8d27 319 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
d2dc94bc 320
f568eeee 321 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
159367bb 322 fw_domain_reset(d);
05a2fb15
MK
323}
324
6ebc9692 325static inline u32 gt_thread_status(struct intel_uncore *uncore)
a5b22b5e
CW
326{
327 u32 val;
328
6cc5ca76 329 val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG);
a5b22b5e
CW
330 val &= GEN6_GT_THREAD_STATUS_CORE_MASK;
331
332 return val;
333}
334
6ebc9692 335static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
05a2fb15 336{
a5b22b5e
CW
337 /*
338 * w/a for a sporadic read returning 0 by waiting for the GT
05a2fb15
MK
339 * thread to wake up.
340 */
a9f236d1
PB
341 drm_WARN_ONCE(&uncore->i915->drm,
342 wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
343 "GT thread status wait timed out\n");
05a2fb15
MK
344}
345
f568eeee 346static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
48c1026a 347 enum forcewake_domains fw_domains)
05a2fb15 348{
5def925d 349 fw_domains_get_normal(uncore, fw_domains);
907b28c5 350
05a2fb15 351 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
6ebc9692 352 __gen6_gt_wait_for_thread_c0(uncore);
907b28c5
CW
353}
354
6ebc9692 355static inline u32 fifo_free_entries(struct intel_uncore *uncore)
c32e3788 356{
6cc5ca76 357 u32 count = __raw_uncore_read32(uncore, GTFIFOCTL);
c32e3788
DG
358
359 return count & GT_FIFO_FREE_ENTRIES_MASK;
360}
361
6ebc9692 362static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
907b28c5 363{
6b07b6d2 364 u32 n;
907b28c5 365
5135d64b
D
366 /* On VLV, FIFO will be shared by both SW and HW.
367 * So, we need to read the FREE_ENTRIES everytime */
01385758 368 if (IS_VALLEYVIEW(uncore->i915))
6ebc9692 369 n = fifo_free_entries(uncore);
6b07b6d2 370 else
272c7e52 371 n = uncore->fifo_count;
6b07b6d2
MK
372
373 if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
6ebc9692 374 if (wait_for_atomic((n = fifo_free_entries(uncore)) >
6b07b6d2
MK
375 GT_FIFO_NUM_RESERVED_ENTRIES,
376 GT_FIFO_TIMEOUT_MS)) {
d0208cfa
WK
377 drm_dbg(&uncore->i915->drm,
378 "GT_FIFO timeout, entries: %u\n", n);
6b07b6d2 379 return;
907b28c5 380 }
907b28c5 381 }
907b28c5 382
272c7e52 383 uncore->fifo_count = n - 1;
907b28c5
CW
384}
385
a57a4a67
TU
386static enum hrtimer_restart
387intel_uncore_fw_release_timer(struct hrtimer *timer)
38cff0b1 388{
a57a4a67
TU
389 struct intel_uncore_forcewake_domain *domain =
390 container_of(timer, struct intel_uncore_forcewake_domain, timer);
f833cdb0 391 struct intel_uncore *uncore = domain->uncore;
b2cff0db 392 unsigned long irqflags;
38cff0b1 393
eb17af67 394 assert_rpm_device_not_suspended(uncore->rpm);
38cff0b1 395
c9e0c6da
CW
396 if (xchg(&domain->active, false))
397 return HRTIMER_RESTART;
398
f568eeee 399 spin_lock_irqsave(&uncore->lock, irqflags);
b2cff0db 400
77adbd8f
CW
401 uncore->fw_domains_timer &= ~domain->mask;
402
403 GEM_BUG_ON(!domain->wake_count);
b8473050 404 if (--domain->wake_count == 0)
5716c8c6 405 fw_domains_put(uncore, domain->mask);
b2cff0db 406
f568eeee 407 spin_unlock_irqrestore(&uncore->lock, irqflags);
a57a4a67
TU
408
409 return HRTIMER_NORESTART;
38cff0b1
ZW
410}
411
a5266db4 412/* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
d60996ab 413static unsigned int
f568eeee 414intel_uncore_forcewake_reset(struct intel_uncore *uncore)
38cff0b1 415{
48c1026a 416 unsigned long irqflags;
b2cff0db 417 struct intel_uncore_forcewake_domain *domain;
48c1026a 418 int retry_count = 100;
003342a5 419 enum forcewake_domains fw, active_domains;
38cff0b1 420
a5266db4
HG
421 iosf_mbi_assert_punit_acquired();
422
b2cff0db
CW
423 /* Hold uncore.lock across reset to prevent any register access
424 * with forcewake not set correctly. Wait until all pending
425 * timers are run before holding.
426 */
427 while (1) {
d2dc94bc
CW
428 unsigned int tmp;
429
b2cff0db 430 active_domains = 0;
38cff0b1 431
f568eeee 432 for_each_fw_domain(domain, uncore, tmp) {
c9e0c6da 433 smp_store_mb(domain->active, false);
a57a4a67 434 if (hrtimer_cancel(&domain->timer) == 0)
b2cff0db 435 continue;
38cff0b1 436
a57a4a67 437 intel_uncore_fw_release_timer(&domain->timer);
b2cff0db 438 }
aec347ab 439
f568eeee 440 spin_lock_irqsave(&uncore->lock, irqflags);
b2ec142c 441
f568eeee 442 for_each_fw_domain(domain, uncore, tmp) {
a57a4a67 443 if (hrtimer_active(&domain->timer))
33c582c1 444 active_domains |= domain->mask;
b2cff0db 445 }
3123fcaf 446
b2cff0db
CW
447 if (active_domains == 0)
448 break;
aec347ab 449
b2cff0db 450 if (--retry_count == 0) {
d0208cfa 451 drm_err(&uncore->i915->drm, "Timed out waiting for forcewake timers to finish\n");
b2cff0db
CW
452 break;
453 }
0294ae7b 454
f568eeee 455 spin_unlock_irqrestore(&uncore->lock, irqflags);
b2cff0db
CW
456 cond_resched();
457 }
0294ae7b 458
a9f236d1 459 drm_WARN_ON(&uncore->i915->drm, active_domains);
b2cff0db 460
f568eeee 461 fw = uncore->fw_domains_active;
b2cff0db 462 if (fw)
5716c8c6 463 fw_domains_put(uncore, fw);
ef46e0d2 464
f568eeee
DCS
465 fw_domains_reset(uncore, uncore->fw_domains);
466 assert_forcewakes_inactive(uncore);
b2cff0db 467
f568eeee 468 spin_unlock_irqrestore(&uncore->lock, irqflags);
d60996ab
CW
469
470 return fw; /* track the lost user forcewake domains */
ef46e0d2
DV
471}
472
8a47eb19 473static bool
6ebc9692 474fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore)
8a47eb19
MK
475{
476 u32 dbg;
477
6cc5ca76 478 dbg = __raw_uncore_read32(uncore, FPGA_DBG);
8a47eb19
MK
479 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
480 return false;
481
29b6f88d
MR
482 /*
483 * Bugs in PCI programming (or failing hardware) can occasionally cause
484 * us to lose access to the MMIO BAR. When this happens, register
485 * reads will come back with 0xFFFFFFFF for every register and things
486 * go bad very quickly. Let's try to detect that special case and at
487 * least try to print a more informative message about what has
488 * happened.
489 *
490 * During normal operation the FPGA_DBG register has several unused
491 * bits that will always read back as 0's so we can use them as canaries
492 * to recognize when MMIO accesses are just busted.
493 */
494 if (unlikely(dbg == ~0))
495 drm_err(&uncore->i915->drm,
496 "Lost access to MMIO BAR; all registers now read back as 0xFFFFFFFF!\n");
497
6cc5ca76 498 __raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
8a47eb19
MK
499
500 return true;
501}
502
8ac3e1bb 503static bool
6ebc9692 504vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore)
8ac3e1bb
MK
505{
506 u32 cer;
507
6cc5ca76 508 cer = __raw_uncore_read32(uncore, CLAIM_ER);
8ac3e1bb
MK
509 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
510 return false;
511
6cc5ca76 512 __raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR);
8ac3e1bb
MK
513
514 return true;
515}
516
a338908c 517static bool
6ebc9692 518gen6_check_for_fifo_debug(struct intel_uncore *uncore)
a338908c
MK
519{
520 u32 fifodbg;
521
6cc5ca76 522 fifodbg = __raw_uncore_read32(uncore, GTFIFODBG);
a338908c
MK
523
524 if (unlikely(fifodbg)) {
d0208cfa 525 drm_dbg(&uncore->i915->drm, "GTFIFODBG = 0x08%x\n", fifodbg);
6cc5ca76 526 __raw_uncore_write32(uncore, GTFIFODBG, fifodbg);
a338908c
MK
527 }
528
529 return fifodbg;
530}
531
8ac3e1bb 532static bool
2cf7bf6f 533check_for_unclaimed_mmio(struct intel_uncore *uncore)
8ac3e1bb 534{
a338908c
MK
535 bool ret = false;
536
0a9b2630
DCS
537 lockdep_assert_held(&uncore->debug->lock);
538
539 if (uncore->debug->suspend_count)
540 return false;
541
2cf7bf6f 542 if (intel_uncore_has_fpga_dbg_unclaimed(uncore))
6ebc9692 543 ret |= fpga_check_for_unclaimed_mmio(uncore);
8ac3e1bb 544
2cf7bf6f 545 if (intel_uncore_has_dbg_unclaimed(uncore))
6ebc9692 546 ret |= vlv_check_for_unclaimed_mmio(uncore);
a338908c 547
2cf7bf6f 548 if (intel_uncore_has_fifo(uncore))
6ebc9692 549 ret |= gen6_check_for_fifo_debug(uncore);
8ac3e1bb 550
a338908c 551 return ret;
8ac3e1bb
MK
552}
553
2e81bc61
DCS
554static void forcewake_early_sanitize(struct intel_uncore *uncore,
555 unsigned int restore_forcewake)
f9b3927a 556{
2e81bc61 557 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
907b28c5 558
a04f90a3 559 /* WaDisableShadowRegForCpd:chv */
01385758 560 if (IS_CHERRYVIEW(uncore->i915)) {
6cc5ca76
DCS
561 __raw_uncore_write32(uncore, GTFIFOCTL,
562 __raw_uncore_read32(uncore, GTFIFOCTL) |
563 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
564 GT_FIFO_CTL_RC6_POLICY_STALL);
a04f90a3
D
565 }
566
a5266db4 567 iosf_mbi_punit_acquire();
f7de5027 568 intel_uncore_forcewake_reset(uncore);
d60996ab 569 if (restore_forcewake) {
f7de5027 570 spin_lock_irq(&uncore->lock);
5716c8c6 571 fw_domains_get(uncore, restore_forcewake);
f7de5027 572
2cf7bf6f 573 if (intel_uncore_has_fifo(uncore))
6ebc9692 574 uncore->fifo_count = fifo_free_entries(uncore);
f7de5027 575 spin_unlock_irq(&uncore->lock);
d60996ab 576 }
a5266db4 577 iosf_mbi_punit_release();
521198a2
MK
578}
579
f7de5027 580void intel_uncore_suspend(struct intel_uncore *uncore)
ed493883 581{
2e81bc61
DCS
582 if (!intel_uncore_has_forcewake(uncore))
583 return;
584
a5266db4
HG
585 iosf_mbi_punit_acquire();
586 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
f7de5027
DCS
587 &uncore->pmic_bus_access_nb);
588 uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore);
a5266db4 589 iosf_mbi_punit_release();
68f60946
HG
590}
591
f7de5027 592void intel_uncore_resume_early(struct intel_uncore *uncore)
68f60946 593{
d60996ab
CW
594 unsigned int restore_forcewake;
595
2e81bc61 596 if (intel_uncore_unclaimed_mmio(uncore))
d0208cfa 597 drm_dbg(&uncore->i915->drm, "unclaimed mmio detected on resume, clearing\n");
2e81bc61
DCS
598
599 if (!intel_uncore_has_forcewake(uncore))
600 return;
601
f7de5027 602 restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
2e81bc61 603 forcewake_early_sanitize(uncore, restore_forcewake);
d60996ab 604
f7de5027 605 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
ed493883
ID
606}
607
f7de5027 608void intel_uncore_runtime_resume(struct intel_uncore *uncore)
bedf4d79 609{
2e81bc61
DCS
610 if (!intel_uncore_has_forcewake(uncore))
611 return;
612
f7de5027 613 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
bedf4d79
HG
614}
615
f568eeee 616static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
a6111f7b
CW
617 enum forcewake_domains fw_domains)
618{
619 struct intel_uncore_forcewake_domain *domain;
d2dc94bc 620 unsigned int tmp;
a6111f7b 621
f568eeee 622 fw_domains &= uncore->fw_domains;
a6111f7b 623
f568eeee 624 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
c9e0c6da 625 if (domain->wake_count++) {
33c582c1 626 fw_domains &= ~domain->mask;
c9e0c6da
CW
627 domain->active = true;
628 }
629 }
a6111f7b 630
b8473050 631 if (fw_domains)
5716c8c6 632 fw_domains_get(uncore, fw_domains);
a6111f7b
CW
633}
634
59bad947
MK
635/**
636 * intel_uncore_forcewake_get - grab forcewake domain references
3ceea6a1 637 * @uncore: the intel_uncore structure
59bad947
MK
638 * @fw_domains: forcewake domains to get reference on
639 *
640 * This function can be used get GT's forcewake domain references.
641 * Normal register access will handle the forcewake domains automatically.
642 * However if some sequence requires the GT to not power down a particular
643 * forcewake domains this function should be called at the beginning of the
644 * sequence. And subsequently the reference should be dropped by symmetric
645 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
646 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
907b28c5 647 */
3ceea6a1 648void intel_uncore_forcewake_get(struct intel_uncore *uncore,
48c1026a 649 enum forcewake_domains fw_domains)
907b28c5
CW
650{
651 unsigned long irqflags;
652
5716c8c6 653 if (!uncore->fw_get_funcs)
ab484f8f
BW
654 return;
655
87b391b9 656 assert_rpm_wakelock_held(uncore->rpm);
c8c8fb33 657
f568eeee
DCS
658 spin_lock_irqsave(&uncore->lock, irqflags);
659 __intel_uncore_forcewake_get(uncore, fw_domains);
660 spin_unlock_irqrestore(&uncore->lock, irqflags);
907b28c5
CW
661}
662
d7a133d8
CW
663/**
664 * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
3ceea6a1 665 * @uncore: the intel_uncore structure
d7a133d8
CW
666 *
667 * This function is a wrapper around intel_uncore_forcewake_get() to acquire
668 * the GT powerwell and in the process disable our debugging for the
669 * duration of userspace's bypass.
670 */
3ceea6a1 671void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
d7a133d8 672{
f568eeee 673 spin_lock_irq(&uncore->lock);
0a9b2630 674 if (!uncore->user_forcewake_count++) {
3ceea6a1 675 intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
0a9b2630
DCS
676 spin_lock(&uncore->debug->lock);
677 mmio_debug_suspend(uncore->debug);
678 spin_unlock(&uncore->debug->lock);
d7a133d8 679 }
f568eeee 680 spin_unlock_irq(&uncore->lock);
d7a133d8
CW
681}
682
683/**
684 * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
3ceea6a1 685 * @uncore: the intel_uncore structure
d7a133d8
CW
686 *
687 * This function complements intel_uncore_forcewake_user_get() and releases
688 * the GT powerwell taken on behalf of the userspace bypass.
689 */
3ceea6a1 690void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
d7a133d8 691{
f568eeee 692 spin_lock_irq(&uncore->lock);
0a9b2630
DCS
693 if (!--uncore->user_forcewake_count) {
694 spin_lock(&uncore->debug->lock);
695 mmio_debug_resume(uncore->debug);
696
697 if (check_for_unclaimed_mmio(uncore))
19b5b50f 698 drm_info(&uncore->i915->drm,
d7a133d8 699 "Invalid mmio detected during user access\n");
0a9b2630 700 spin_unlock(&uncore->debug->lock);
d7a133d8 701
3ceea6a1 702 intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
d7a133d8 703 }
f568eeee 704 spin_unlock_irq(&uncore->lock);
d7a133d8
CW
705}
706
59bad947 707/**
a6111f7b 708 * intel_uncore_forcewake_get__locked - grab forcewake domain references
3ceea6a1 709 * @uncore: the intel_uncore structure
a6111f7b 710 * @fw_domains: forcewake domains to get reference on
59bad947 711 *
a6111f7b
CW
712 * See intel_uncore_forcewake_get(). This variant places the onus
713 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
907b28c5 714 */
3ceea6a1 715void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
a6111f7b
CW
716 enum forcewake_domains fw_domains)
717{
f568eeee
DCS
718 lockdep_assert_held(&uncore->lock);
719
5716c8c6 720 if (!uncore->fw_get_funcs)
a6111f7b
CW
721 return;
722
f568eeee 723 __intel_uncore_forcewake_get(uncore, fw_domains);
a6111f7b
CW
724}
725
f568eeee 726static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
a6111f7b 727 enum forcewake_domains fw_domains)
907b28c5 728{
b2cff0db 729 struct intel_uncore_forcewake_domain *domain;
d2dc94bc 730 unsigned int tmp;
907b28c5 731
f568eeee 732 fw_domains &= uncore->fw_domains;
b2cff0db 733
f568eeee 734 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
77adbd8f 735 GEM_BUG_ON(!domain->wake_count);
b2cff0db 736
c9e0c6da
CW
737 if (--domain->wake_count) {
738 domain->active = true;
b2cff0db 739 continue;
c9e0c6da 740 }
b2cff0db 741
5716c8c6 742 fw_domains_put(uncore, domain->mask);
aec347ab 743 }
a6111f7b 744}
dc9fb09c 745
a6111f7b
CW
746/**
747 * intel_uncore_forcewake_put - release a forcewake domain reference
3ceea6a1 748 * @uncore: the intel_uncore structure
a6111f7b
CW
749 * @fw_domains: forcewake domains to put references
750 *
751 * This function drops the device-level forcewakes for specified
752 * domains obtained by intel_uncore_forcewake_get().
753 */
3ceea6a1 754void intel_uncore_forcewake_put(struct intel_uncore *uncore,
a6111f7b
CW
755 enum forcewake_domains fw_domains)
756{
757 unsigned long irqflags;
758
5716c8c6 759 if (!uncore->fw_get_funcs)
a6111f7b
CW
760 return;
761
f568eeee
DCS
762 spin_lock_irqsave(&uncore->lock, irqflags);
763 __intel_uncore_forcewake_put(uncore, fw_domains);
764 spin_unlock_irqrestore(&uncore->lock, irqflags);
907b28c5
CW
765}
766
032d992d
CW
767/**
768 * intel_uncore_forcewake_flush - flush the delayed release
769 * @uncore: the intel_uncore structure
770 * @fw_domains: forcewake domains to flush
771 */
772void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
773 enum forcewake_domains fw_domains)
774{
775 struct intel_uncore_forcewake_domain *domain;
776 unsigned int tmp;
777
5716c8c6 778 if (!uncore->fw_get_funcs)
032d992d
CW
779 return;
780
781 fw_domains &= uncore->fw_domains;
782 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
783 WRITE_ONCE(domain->active, false);
784 if (hrtimer_cancel(&domain->timer))
785 intel_uncore_fw_release_timer(&domain->timer);
786 }
787}
788
a6111f7b
CW
789/**
790 * intel_uncore_forcewake_put__locked - grab forcewake domain references
3ceea6a1 791 * @uncore: the intel_uncore structure
a6111f7b
CW
792 * @fw_domains: forcewake domains to get reference on
793 *
794 * See intel_uncore_forcewake_put(). This variant places the onus
795 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
796 */
3ceea6a1 797void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
a6111f7b
CW
798 enum forcewake_domains fw_domains)
799{
f568eeee
DCS
800 lockdep_assert_held(&uncore->lock);
801
5716c8c6 802 if (!uncore->fw_get_funcs)
a6111f7b
CW
803 return;
804
f568eeee 805 __intel_uncore_forcewake_put(uncore, fw_domains);
a6111f7b
CW
806}
807
f568eeee 808void assert_forcewakes_inactive(struct intel_uncore *uncore)
e998c40f 809{
5716c8c6 810 if (!uncore->fw_get_funcs)
e998c40f
PZ
811 return;
812
a9f236d1
PB
813 drm_WARN(&uncore->i915->drm, uncore->fw_domains_active,
814 "Expected all fw_domains to be inactive, but %08x are still on\n",
815 uncore->fw_domains_active);
67e64564
CW
816}
817
f568eeee 818void assert_forcewakes_active(struct intel_uncore *uncore,
67e64564
CW
819 enum forcewake_domains fw_domains)
820{
b7dc9395
CW
821 struct intel_uncore_forcewake_domain *domain;
822 unsigned int tmp;
823
824 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
825 return;
826
5716c8c6 827 if (!uncore->fw_get_funcs)
67e64564
CW
828 return;
829
15e7facb
CW
830 spin_lock_irq(&uncore->lock);
831
87b391b9 832 assert_rpm_wakelock_held(uncore->rpm);
67e64564 833
f568eeee 834 fw_domains &= uncore->fw_domains;
a9f236d1
PB
835 drm_WARN(&uncore->i915->drm, fw_domains & ~uncore->fw_domains_active,
836 "Expected %08x fw_domains to be active, but %08x are off\n",
837 fw_domains, fw_domains & ~uncore->fw_domains_active);
b7dc9395
CW
838
839 /*
840 * Check that the caller has an explicit wakeref and we don't mistake
841 * it for the auto wakeref.
842 */
b7dc9395 843 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
badf1f27 844 unsigned int actual = READ_ONCE(domain->wake_count);
b7dc9395
CW
845 unsigned int expect = 1;
846
77adbd8f 847 if (uncore->fw_domains_timer & domain->mask)
b7dc9395
CW
848 expect++; /* pending automatic release */
849
a9f236d1
PB
850 if (drm_WARN(&uncore->i915->drm, actual < expect,
851 "Expected domain %d to be held awake by caller, count=%d\n",
852 domain->id, actual))
b7dc9395
CW
853 break;
854 }
15e7facb
CW
855
856 spin_unlock_irq(&uncore->lock);
e998c40f
PZ
857}
858
907b28c5 859/* We give fast paths for the really cool registers */
aef02736
MR
860#define NEEDS_FORCE_WAKE(reg) ({ \
861 u32 __reg = (reg); \
862 __reg < 0x40000 || __reg >= GEN11_BSD_RING_BASE; \
6863b76c
TU
863})
864
9480dbf0 865static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
91e630b9 866{
91e630b9
TU
867 if (offset < entry->start)
868 return -1;
869 else if (offset > entry->end)
870 return 1;
871 else
872 return 0;
873}
874
9480dbf0
TU
875/* Copied and "macroized" from lib/bsearch.c */
876#define BSEARCH(key, base, num, cmp) ({ \
877 unsigned int start__ = 0, end__ = (num); \
878 typeof(base) result__ = NULL; \
879 while (start__ < end__) { \
880 unsigned int mid__ = start__ + (end__ - start__) / 2; \
881 int ret__ = (cmp)((key), (base) + mid__); \
882 if (ret__ < 0) { \
883 end__ = mid__; \
884 } else if (ret__ > 0) { \
885 start__ = mid__ + 1; \
886 } else { \
887 result__ = (base) + mid__; \
888 break; \
889 } \
890 } \
891 result__; \
892})
893
9fc1117c 894static enum forcewake_domains
cb7ee690 895find_fw_domain(struct intel_uncore *uncore, u32 offset)
9fc1117c 896{
9480dbf0 897 const struct intel_forcewake_range *entry;
9fc1117c 898
9480dbf0 899 entry = BSEARCH(offset,
cb7ee690
DCS
900 uncore->fw_domains_table,
901 uncore->fw_domains_table_entries,
91e630b9 902 fw_range_cmp);
38fb6a40 903
99191427
JL
904 if (!entry)
905 return 0;
906
a89a70a8
DCS
907 /*
908 * The list of FW domains depends on the SKU in gen11+ so we
909 * can't determine it statically. We use FORCEWAKE_ALL and
910 * translate it here to the list of available domains.
911 */
912 if (entry->domains == FORCEWAKE_ALL)
cb7ee690 913 return uncore->fw_domains;
a89a70a8 914
a9f236d1
PB
915 drm_WARN(&uncore->i915->drm, entry->domains & ~uncore->fw_domains,
916 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
917 entry->domains & ~uncore->fw_domains, offset);
99191427
JL
918
919 return entry->domains;
9fc1117c
TU
920}
921
922#define GEN_FW_RANGE(s, e, d) \
923 { .start = (s), .end = (e), .domains = (d) }
1938e59a 924
b0081239 925/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
9fc1117c
TU
926static const struct intel_forcewake_range __vlv_fw_ranges[] = {
927 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
928 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
929 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
9fc1117c
TU
930 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
931 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
b0081239 932 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
9fc1117c
TU
933 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
934};
1938e59a 935
272c7e52 936#define __fwtable_reg_read_fw_domains(uncore, offset) \
6863b76c
TU
937({ \
938 enum forcewake_domains __fwd = 0; \
0dd356bb 939 if (NEEDS_FORCE_WAKE((offset))) \
272c7e52 940 __fwd = find_fw_domain(uncore, offset); \
6863b76c
TU
941 __fwd; \
942})
943
47188574 944/* *Must* be sorted by offset! See intel_shadow_table_check(). */
f9d56cd6
MR
945static const struct i915_range gen8_shadowed_regs[] = {
946 { .start = 0x2030, .end = 0x2030 },
947 { .start = 0xA008, .end = 0xA00C },
948 { .start = 0x12030, .end = 0x12030 },
949 { .start = 0x1a030, .end = 0x1a030 },
950 { .start = 0x22030, .end = 0x22030 },
6863b76c
TU
951 /* TODO: Other registers are not yet used */
952};
953
f9d56cd6
MR
954static const struct i915_range gen11_shadowed_regs[] = {
955 { .start = 0x2030, .end = 0x2030 },
956 { .start = 0x2550, .end = 0x2550 },
957 { .start = 0xA008, .end = 0xA00C },
958 { .start = 0x22030, .end = 0x22030 },
0bb50de1
MR
959 { .start = 0x22230, .end = 0x22230 },
960 { .start = 0x22510, .end = 0x22550 },
f9d56cd6 961 { .start = 0x1C0030, .end = 0x1C0030 },
0bb50de1
MR
962 { .start = 0x1C0230, .end = 0x1C0230 },
963 { .start = 0x1C0510, .end = 0x1C0550 },
f9d56cd6 964 { .start = 0x1C4030, .end = 0x1C4030 },
0bb50de1
MR
965 { .start = 0x1C4230, .end = 0x1C4230 },
966 { .start = 0x1C4510, .end = 0x1C4550 },
f9d56cd6 967 { .start = 0x1C8030, .end = 0x1C8030 },
0bb50de1
MR
968 { .start = 0x1C8230, .end = 0x1C8230 },
969 { .start = 0x1C8510, .end = 0x1C8550 },
f9d56cd6 970 { .start = 0x1D0030, .end = 0x1D0030 },
0bb50de1
MR
971 { .start = 0x1D0230, .end = 0x1D0230 },
972 { .start = 0x1D0510, .end = 0x1D0550 },
f9d56cd6 973 { .start = 0x1D4030, .end = 0x1D4030 },
0bb50de1
MR
974 { .start = 0x1D4230, .end = 0x1D4230 },
975 { .start = 0x1D4510, .end = 0x1D4550 },
f9d56cd6 976 { .start = 0x1D8030, .end = 0x1D8030 },
0bb50de1
MR
977 { .start = 0x1D8230, .end = 0x1D8230 },
978 { .start = 0x1D8510, .end = 0x1D8550 },
a89a70a8
DCS
979};
980
f9d56cd6
MR
981static const struct i915_range gen12_shadowed_regs[] = {
982 { .start = 0x2030, .end = 0x2030 },
5798a769 983 { .start = 0x2510, .end = 0x2550 },
f9d56cd6 984 { .start = 0xA008, .end = 0xA00C },
5798a769
MR
985 { .start = 0xA188, .end = 0xA188 },
986 { .start = 0xA278, .end = 0xA278 },
987 { .start = 0xA540, .end = 0xA56C },
988 { .start = 0xC4C8, .end = 0xC4C8 },
989 { .start = 0xC4D4, .end = 0xC4D4 },
990 { .start = 0xC600, .end = 0xC600 },
f9d56cd6 991 { .start = 0x22030, .end = 0x22030 },
5798a769 992 { .start = 0x22510, .end = 0x22550 },
f9d56cd6 993 { .start = 0x1C0030, .end = 0x1C0030 },
5798a769 994 { .start = 0x1C0510, .end = 0x1C0550 },
f9d56cd6 995 { .start = 0x1C4030, .end = 0x1C4030 },
5798a769 996 { .start = 0x1C4510, .end = 0x1C4550 },
f9d56cd6 997 { .start = 0x1C8030, .end = 0x1C8030 },
5798a769 998 { .start = 0x1C8510, .end = 0x1C8550 },
f9d56cd6 999 { .start = 0x1D0030, .end = 0x1D0030 },
5798a769 1000 { .start = 0x1D0510, .end = 0x1D0550 },
f9d56cd6 1001 { .start = 0x1D4030, .end = 0x1D4030 },
5798a769 1002 { .start = 0x1D4510, .end = 0x1D4550 },
f9d56cd6 1003 { .start = 0x1D8030, .end = 0x1D8030 },
5798a769 1004 { .start = 0x1D8510, .end = 0x1D8550 },
bfac1e2b 1005
5c5c40e2
MR
1006 /*
1007 * The rest of these ranges are specific to Xe_HP and beyond, but
1008 * are reserved/unused ranges on earlier gen12 platforms, so they can
1009 * be safely added to the gen12 table.
1010 */
f9d56cd6 1011 { .start = 0x1E0030, .end = 0x1E0030 },
5c5c40e2 1012 { .start = 0x1E0510, .end = 0x1E0550 },
f9d56cd6 1013 { .start = 0x1E4030, .end = 0x1E4030 },
5c5c40e2 1014 { .start = 0x1E4510, .end = 0x1E4550 },
f9d56cd6 1015 { .start = 0x1E8030, .end = 0x1E8030 },
5c5c40e2 1016 { .start = 0x1E8510, .end = 0x1E8550 },
f9d56cd6 1017 { .start = 0x1F0030, .end = 0x1F0030 },
5c5c40e2 1018 { .start = 0x1F0510, .end = 0x1F0550 },
f9d56cd6 1019 { .start = 0x1F4030, .end = 0x1F4030 },
5c5c40e2 1020 { .start = 0x1F4510, .end = 0x1F4550 },
f9d56cd6 1021 { .start = 0x1F8030, .end = 0x1F8030 },
5c5c40e2 1022 { .start = 0x1F8510, .end = 0x1F8550 },
bfac1e2b
MR
1023};
1024
c74e66d4
MR
1025static const struct i915_range dg2_shadowed_regs[] = {
1026 { .start = 0x2030, .end = 0x2030 },
1027 { .start = 0x2510, .end = 0x2550 },
1028 { .start = 0xA008, .end = 0xA00C },
1029 { .start = 0xA188, .end = 0xA188 },
1030 { .start = 0xA278, .end = 0xA278 },
1031 { .start = 0xA540, .end = 0xA56C },
1032 { .start = 0xC4C8, .end = 0xC4C8 },
1033 { .start = 0xC4E0, .end = 0xC4E0 },
1034 { .start = 0xC600, .end = 0xC600 },
1035 { .start = 0xC658, .end = 0xC658 },
1036 { .start = 0x22030, .end = 0x22030 },
1037 { .start = 0x22510, .end = 0x22550 },
1038 { .start = 0x1C0030, .end = 0x1C0030 },
1039 { .start = 0x1C0510, .end = 0x1C0550 },
1040 { .start = 0x1C4030, .end = 0x1C4030 },
1041 { .start = 0x1C4510, .end = 0x1C4550 },
1042 { .start = 0x1C8030, .end = 0x1C8030 },
1043 { .start = 0x1C8510, .end = 0x1C8550 },
1044 { .start = 0x1D0030, .end = 0x1D0030 },
1045 { .start = 0x1D0510, .end = 0x1D0550 },
1046 { .start = 0x1D4030, .end = 0x1D4030 },
1047 { .start = 0x1D4510, .end = 0x1D4550 },
1048 { .start = 0x1D8030, .end = 0x1D8030 },
1049 { .start = 0x1D8510, .end = 0x1D8550 },
1050 { .start = 0x1E0030, .end = 0x1E0030 },
1051 { .start = 0x1E0510, .end = 0x1E0550 },
1052 { .start = 0x1E4030, .end = 0x1E4030 },
1053 { .start = 0x1E4510, .end = 0x1E4550 },
1054 { .start = 0x1E8030, .end = 0x1E8030 },
1055 { .start = 0x1E8510, .end = 0x1E8550 },
1056 { .start = 0x1F0030, .end = 0x1F0030 },
1057 { .start = 0x1F0510, .end = 0x1F0550 },
1058 { .start = 0x1F4030, .end = 0x1F4030 },
1059 { .start = 0x1F4510, .end = 0x1F4550 },
1060 { .start = 0x1F8030, .end = 0x1F8030 },
1061 { .start = 0x1F8510, .end = 0x1F8550 },
cf82d9dd
MT
1062};
1063
f9d56cd6 1064static int mmio_range_cmp(u32 key, const struct i915_range *range)
5a659383 1065{
f9d56cd6 1066 if (key < range->start)
5a659383 1067 return -1;
f9d56cd6 1068 else if (key > range->end)
5a659383
TU
1069 return 1;
1070 else
1071 return 0;
1072}
1073
6cdbb101
MR
1074static bool is_shadowed(struct intel_uncore *uncore, u32 offset)
1075{
1076 if (drm_WARN_ON(&uncore->i915->drm, !uncore->shadowed_reg_table))
1077 return false;
6863b76c 1078
6cdbb101
MR
1079 return BSEARCH(offset,
1080 uncore->shadowed_reg_table,
1081 uncore->shadowed_reg_table_entries,
1082 mmio_range_cmp);
1083}
a89a70a8 1084
ccb2acea
DCS
1085static enum forcewake_domains
1086gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
1087{
1088 return FORCEWAKE_RENDER;
1089}
1090
1ab2b4cd
MR
1091static const struct intel_forcewake_range __gen6_fw_ranges[] = {
1092 GEN_FW_RANGE(0x0, 0x3ffff, FORCEWAKE_RENDER),
1093};
6863b76c 1094
b0081239 1095/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
9fc1117c
TU
1096static const struct intel_forcewake_range __chv_fw_ranges[] = {
1097 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
b0081239 1098 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c 1099 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
b0081239 1100 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c 1101 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
b0081239 1102 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c 1103 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
b0081239
TU
1104 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1105 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
9fc1117c 1106 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
b0081239
TU
1107 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
1108 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c
TU
1109 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1110 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
1111 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
1112 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
9fc1117c 1113};
38fb6a40 1114
272c7e52 1115#define __fwtable_reg_write_fw_domains(uncore, offset) \
a89a70a8
DCS
1116({ \
1117 enum forcewake_domains __fwd = 0; \
c9f8d187 1118 const u32 __offset = (offset); \
aef02736 1119 if (NEEDS_FORCE_WAKE((__offset)) && !is_shadowed(uncore, __offset)) \
bfac1e2b
MR
1120 __fwd = find_fw_domain(uncore, __offset); \
1121 __fwd; \
1122})
1123
b0081239 1124/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
9fc1117c 1125static const struct intel_forcewake_range __gen9_fw_ranges[] = {
55e3c170 1126 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_GT),
9fc1117c
TU
1127 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
1128 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
55e3c170 1129 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
9fc1117c 1130 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
55e3c170 1131 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT),
9fc1117c 1132 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
55e3c170 1133 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_GT),
b0081239 1134 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
9fc1117c 1135 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
55e3c170 1136 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT),
9fc1117c 1137 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
55e3c170 1138 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT),
b0081239 1139 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
55e3c170 1140 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_GT),
9fc1117c 1141 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
55e3c170 1142 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_GT),
9fc1117c 1143 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
55e3c170 1144 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT),
b0081239 1145 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
55e3c170 1146 GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_GT),
9fc1117c 1147 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
55e3c170 1148 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_GT),
b0081239 1149 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
55e3c170 1150 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_GT),
9fc1117c 1151 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
55e3c170 1152 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_GT),
9fc1117c 1153 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
55e3c170 1154 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_GT),
b0081239 1155 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
55e3c170 1156 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_GT),
9fc1117c
TU
1157 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
1158};
6863b76c 1159
a89a70a8
DCS
1160/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
1161static const struct intel_forcewake_range __gen11_fw_ranges[] = {
c4310def 1162 GEN_FW_RANGE(0x0, 0x1fff, 0), /* uncore range */
a89a70a8 1163 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
55e3c170 1164 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
a89a70a8 1165 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
55e3c170 1166 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT),
a89a70a8 1167 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
55e3c170 1168 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
a89a70a8 1169 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
55e3c170 1170 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT),
a89a70a8 1171 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
55e3c170 1172 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT),
c4310def 1173 GEN_FW_RANGE(0x8800, 0x8bff, 0),
a89a70a8 1174 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
55e3c170 1175 GEN_FW_RANGE(0x8d00, 0x94cf, FORCEWAKE_GT),
c4310def
RS
1176 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1177 GEN_FW_RANGE(0x9560, 0x95ff, 0),
55e3c170 1178 GEN_FW_RANGE(0x9600, 0xafff, FORCEWAKE_GT),
a89a70a8 1179 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
55e3c170 1180 GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_GT),
c9f8d187 1181 GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER),
55e3c170 1182 GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_GT),
c9f8d187 1183 GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER),
55e3c170 1184 GEN_FW_RANGE(0x1a000, 0x23fff, FORCEWAKE_GT),
c4310def 1185 GEN_FW_RANGE(0x24000, 0x2407f, 0),
55e3c170 1186 GEN_FW_RANGE(0x24080, 0x2417f, FORCEWAKE_GT),
c4310def 1187 GEN_FW_RANGE(0x24180, 0x242ff, FORCEWAKE_RENDER),
55e3c170 1188 GEN_FW_RANGE(0x24300, 0x243ff, FORCEWAKE_GT),
c4310def 1189 GEN_FW_RANGE(0x24400, 0x24fff, FORCEWAKE_RENDER),
55e3c170 1190 GEN_FW_RANGE(0x25000, 0x3ffff, FORCEWAKE_GT),
a89a70a8
DCS
1191 GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1192 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
c4310def
RS
1193 GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
1194 GEN_FW_RANGE(0x1c8000, 0x1cffff, FORCEWAKE_MEDIA_VEBOX0),
a89a70a8 1195 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
c4310def 1196 GEN_FW_RANGE(0x1d4000, 0x1dbfff, 0)
a89a70a8
DCS
1197};
1198
92f5df0d
MR
1199/*
1200 * *Must* be sorted by offset ranges! See intel_fw_table_check().
1201 *
1202 * Note that the spec lists several reserved/unused ranges that don't
1203 * actually contain any registers. In the table below we'll combine those
1204 * reserved ranges with either the preceding or following range to keep the
1205 * table small and lookups fast.
1206 */
cf82d9dd 1207static const struct intel_forcewake_range __gen12_fw_ranges[] = {
92f5df0d
MR
1208 GEN_FW_RANGE(0x0, 0x1fff, 0), /*
1209 0x0 - 0xaff: reserved
1210 0xb00 - 0x1fff: always on */
cf82d9dd 1211 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
92f5df0d
MR
1212 GEN_FW_RANGE(0x2700, 0x27ff, FORCEWAKE_GT),
1213 GEN_FW_RANGE(0x2800, 0x2aff, FORCEWAKE_RENDER),
1214 GEN_FW_RANGE(0x2b00, 0x2fff, FORCEWAKE_GT),
cf82d9dd 1215 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
92f5df0d
MR
1216 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT), /*
1217 0x4000 - 0x48ff: gt
1218 0x4900 - 0x51ff: reserved */
1219 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), /*
1220 0x5200 - 0x53ff: render
1221 0x5400 - 0x54ff: reserved
1222 0x5500 - 0x7fff: render */
55e3c170 1223 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
cf82d9dd 1224 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
92f5df0d
MR
1225 GEN_FW_RANGE(0x8160, 0x81ff, 0), /*
1226 0x8160 - 0x817f: reserved
1227 0x8180 - 0x81ff: always on */
1228 GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT),
cf82d9dd 1229 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
92f5df0d
MR
1230 GEN_FW_RANGE(0x8500, 0x94cf, FORCEWAKE_GT), /*
1231 0x8500 - 0x87ff: gt
1232 0x8800 - 0x8fff: reserved
1233 0x9000 - 0x947f: gt
1234 0x9480 - 0x94cf: reserved */
1235 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1236 GEN_FW_RANGE(0x9560, 0x97ff, 0), /*
1237 0x9560 - 0x95ff: always on
1238 0x9600 - 0x97ff: reserved */
55e3c170 1239 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT),
92f5df0d
MR
1240 GEN_FW_RANGE(0xb000, 0xb3ff, FORCEWAKE_RENDER),
1241 GEN_FW_RANGE(0xb400, 0xcfff, FORCEWAKE_GT), /*
1242 0xb400 - 0xbf7f: gt
1243 0xb480 - 0xbfff: reserved
1244 0xc000 - 0xcfff: gt */
1245 GEN_FW_RANGE(0xd000, 0xd7ff, 0),
1246 GEN_FW_RANGE(0xd800, 0xd8ff, FORCEWAKE_RENDER),
1247 GEN_FW_RANGE(0xd900, 0xdbff, FORCEWAKE_GT),
1248 GEN_FW_RANGE(0xdc00, 0xefff, FORCEWAKE_RENDER), /*
1249 0xdc00 - 0xddff: render
1250 0xde00 - 0xde7f: reserved
1251 0xde80 - 0xe8ff: render
1252 0xe900 - 0xefff: reserved */
1253 GEN_FW_RANGE(0xf000, 0x147ff, FORCEWAKE_GT), /*
1254 0xf000 - 0xffff: gt
1255 0x10000 - 0x147ff: reserved */
1256 GEN_FW_RANGE(0x14800, 0x1ffff, FORCEWAKE_RENDER), /*
1257 0x14800 - 0x14fff: render
1258 0x15000 - 0x16dff: reserved
1259 0x16e00 - 0x1bfff: render
1260 0x1c000 - 0x1ffff: reserved */
1261 GEN_FW_RANGE(0x20000, 0x20fff, FORCEWAKE_MEDIA_VDBOX0),
1262 GEN_FW_RANGE(0x21000, 0x21fff, FORCEWAKE_MEDIA_VDBOX2),
1263 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
1264 GEN_FW_RANGE(0x24000, 0x2417f, 0), /*
1265 0x24000 - 0x2407f: always on
1266 0x24080 - 0x2417f: reserved */
1267 GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /*
1268 0x24180 - 0x241ff: gt
1269 0x24200 - 0x249ff: reserved */
1270 GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /*
1271 0x24a00 - 0x24a7f: render
1272 0x24a80 - 0x251ff: reserved */
1273 GEN_FW_RANGE(0x25200, 0x255ff, FORCEWAKE_GT), /*
1274 0x25200 - 0x252ff: gt
1275 0x25300 - 0x255ff: reserved */
1276 GEN_FW_RANGE(0x25600, 0x2567f, FORCEWAKE_MEDIA_VDBOX0),
1277 GEN_FW_RANGE(0x25680, 0x259ff, FORCEWAKE_MEDIA_VDBOX2), /*
1278 0x25680 - 0x256ff: VD2
1279 0x25700 - 0x259ff: reserved */
1280 GEN_FW_RANGE(0x25a00, 0x25a7f, FORCEWAKE_MEDIA_VDBOX0),
1281 GEN_FW_RANGE(0x25a80, 0x2ffff, FORCEWAKE_MEDIA_VDBOX2), /*
1282 0x25a80 - 0x25aff: VD2
1283 0x25b00 - 0x2ffff: reserved */
1284 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
cf82d9dd 1285 GEN_FW_RANGE(0x40000, 0x1bffff, 0),
92f5df0d
MR
1286 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /*
1287 0x1c0000 - 0x1c2bff: VD0
1288 0x1c2c00 - 0x1c2cff: reserved
1289 0x1c2d00 - 0x1c2dff: VD0
1290 0x1c2e00 - 0x1c3eff: reserved
1291 0x1c3f00 - 0x1c3fff: VD0 */
1292 GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
1293 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*
1294 0x1c8000 - 0x1ca0ff: VE0
1295 0x1ca100 - 0x1cbeff: reserved
1296 0x1cbf00 - 0x1cbfff: VE0 */
1297 GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX0), /*
1298 0x1cc000 - 0x1ccfff: VD0
1299 0x1cd000 - 0x1cffff: reserved */
1300 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /*
1301 0x1d0000 - 0x1d2bff: VD2
1302 0x1d2c00 - 0x1d2cff: reserved
1303 0x1d2d00 - 0x1d2dff: VD2
1304 0x1d2e00 - 0x1d3eff: reserved
1305 0x1d3f00 - 0x1d3fff: VD2 */
cf82d9dd
MT
1306};
1307
e0531636
MR
1308/*
1309 * Graphics IP version 12.55 brings a slight change to the 0xd800 range,
1310 * switching it from the GT domain to the render domain.
1311 *
1312 * *Must* be sorted by offset ranges! See intel_fw_table_check().
1313 */
1314#define XEHP_FWRANGES(FW_RANGE_D800) \
1315 GEN_FW_RANGE(0x0, 0x1fff, 0), /* \
1316 0x0 - 0xaff: reserved \
1317 0xb00 - 0x1fff: always on */ \
1318 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), \
1319 GEN_FW_RANGE(0x2700, 0x4aff, FORCEWAKE_GT), \
1320 GEN_FW_RANGE(0x4b00, 0x51ff, 0), /* \
1321 0x4b00 - 0x4fff: reserved \
1322 0x5000 - 0x51ff: always on */ \
1323 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), \
1324 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT), \
1325 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), \
1326 GEN_FW_RANGE(0x8160, 0x81ff, 0), /* \
1327 0x8160 - 0x817f: reserved \
1328 0x8180 - 0x81ff: always on */ \
1329 GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT), \
1330 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), \
1331 GEN_FW_RANGE(0x8500, 0x8cff, FORCEWAKE_GT), /* \
1332 0x8500 - 0x87ff: gt \
1333 0x8800 - 0x8c7f: reserved \
1334 0x8c80 - 0x8cff: gt (DG2 only) */ \
1335 GEN_FW_RANGE(0x8d00, 0x8fff, FORCEWAKE_RENDER), /* \
1336 0x8d00 - 0x8dff: render (DG2 only) \
1337 0x8e00 - 0x8fff: reserved */ \
1338 GEN_FW_RANGE(0x9000, 0x94cf, FORCEWAKE_GT), /* \
1339 0x9000 - 0x947f: gt \
1340 0x9480 - 0x94cf: reserved */ \
1341 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER), \
1342 GEN_FW_RANGE(0x9560, 0x967f, 0), /* \
1343 0x9560 - 0x95ff: always on \
1344 0x9600 - 0x967f: reserved */ \
1345 GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER), /* \
1346 0x9680 - 0x96ff: render (DG2 only) \
1347 0x9700 - 0x97ff: reserved */ \
1348 GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /* \
1349 0x9800 - 0xb4ff: gt \
1350 0xb500 - 0xbfff: reserved \
1351 0xc000 - 0xcfff: gt */ \
1352 GEN_FW_RANGE(0xd000, 0xd7ff, 0), \
1353 GEN_FW_RANGE(0xd800, 0xd87f, FW_RANGE_D800), \
1354 GEN_FW_RANGE(0xd880, 0xdbff, FORCEWAKE_GT), \
1355 GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER), \
1356 GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /* \
1357 0xdd00 - 0xddff: gt \
1358 0xde00 - 0xde7f: reserved */ \
1359 GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /* \
1360 0xde80 - 0xdfff: render \
1361 0xe000 - 0xe0ff: reserved \
1362 0xe100 - 0xe8ff: render */ \
1363 GEN_FW_RANGE(0xe900, 0xffff, FORCEWAKE_GT), /* \
1364 0xe900 - 0xe9ff: gt \
1365 0xea00 - 0xefff: reserved \
1366 0xf000 - 0xffff: gt */ \
1367 GEN_FW_RANGE(0x10000, 0x12fff, 0), /* \
1368 0x10000 - 0x11fff: reserved \
1369 0x12000 - 0x127ff: always on \
1370 0x12800 - 0x12fff: reserved */ \
1371 GEN_FW_RANGE(0x13000, 0x131ff, FORCEWAKE_MEDIA_VDBOX0), /* DG2 only */ \
1372 GEN_FW_RANGE(0x13200, 0x13fff, FORCEWAKE_MEDIA_VDBOX2), /* \
1373 0x13200 - 0x133ff: VD2 (DG2 only) \
1374 0x13400 - 0x13fff: reserved */ \
1375 GEN_FW_RANGE(0x14000, 0x141ff, FORCEWAKE_MEDIA_VDBOX0), /* XEHPSDV only */ \
1376 GEN_FW_RANGE(0x14200, 0x143ff, FORCEWAKE_MEDIA_VDBOX2), /* XEHPSDV only */ \
1377 GEN_FW_RANGE(0x14400, 0x145ff, FORCEWAKE_MEDIA_VDBOX4), /* XEHPSDV only */ \
1378 GEN_FW_RANGE(0x14600, 0x147ff, FORCEWAKE_MEDIA_VDBOX6), /* XEHPSDV only */ \
1379 GEN_FW_RANGE(0x14800, 0x14fff, FORCEWAKE_RENDER), \
1380 GEN_FW_RANGE(0x15000, 0x16dff, FORCEWAKE_GT), /* \
1381 0x15000 - 0x15fff: gt (DG2 only) \
1382 0x16000 - 0x16dff: reserved */ \
1383 GEN_FW_RANGE(0x16e00, 0x1ffff, FORCEWAKE_RENDER), \
1384 GEN_FW_RANGE(0x20000, 0x21fff, FORCEWAKE_MEDIA_VDBOX0), /* \
1385 0x20000 - 0x20fff: VD0 (XEHPSDV only) \
1386 0x21000 - 0x21fff: reserved */ \
1387 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT), \
1388 GEN_FW_RANGE(0x24000, 0x2417f, 0), /* \
1389 0x24000 - 0x2407f: always on \
1390 0x24080 - 0x2417f: reserved */ \
1391 GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /* \
1392 0x24180 - 0x241ff: gt \
1393 0x24200 - 0x249ff: reserved */ \
1394 GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /* \
1395 0x24a00 - 0x24a7f: render \
1396 0x24a80 - 0x251ff: reserved */ \
1397 GEN_FW_RANGE(0x25200, 0x25fff, FORCEWAKE_GT), /* \
1398 0x25200 - 0x252ff: gt \
1399 0x25300 - 0x25fff: reserved */ \
1400 GEN_FW_RANGE(0x26000, 0x2ffff, FORCEWAKE_RENDER), /* \
1401 0x26000 - 0x27fff: render \
1402 0x28000 - 0x29fff: reserved \
1403 0x2a000 - 0x2ffff: undocumented */ \
1404 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT), \
1405 GEN_FW_RANGE(0x40000, 0x1bffff, 0), \
1406 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /* \
1407 0x1c0000 - 0x1c2bff: VD0 \
1408 0x1c2c00 - 0x1c2cff: reserved \
1409 0x1c2d00 - 0x1c2dff: VD0 \
1410 0x1c2e00 - 0x1c3eff: VD0 (DG2 only) \
1411 0x1c3f00 - 0x1c3fff: VD0 */ \
1412 GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), /* \
1413 0x1c4000 - 0x1c6bff: VD1 \
1414 0x1c6c00 - 0x1c6cff: reserved \
1415 0x1c6d00 - 0x1c6dff: VD1 \
1416 0x1c6e00 - 0x1c7fff: reserved */ \
1417 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /* \
1418 0x1c8000 - 0x1ca0ff: VE0 \
1419 0x1ca100 - 0x1cbfff: reserved */ \
1420 GEN_FW_RANGE(0x1cc000, 0x1ccfff, FORCEWAKE_MEDIA_VDBOX0), \
1421 GEN_FW_RANGE(0x1cd000, 0x1cdfff, FORCEWAKE_MEDIA_VDBOX2), \
1422 GEN_FW_RANGE(0x1ce000, 0x1cefff, FORCEWAKE_MEDIA_VDBOX4), \
1423 GEN_FW_RANGE(0x1cf000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX6), \
1424 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /* \
1425 0x1d0000 - 0x1d2bff: VD2 \
1426 0x1d2c00 - 0x1d2cff: reserved \
1427 0x1d2d00 - 0x1d2dff: VD2 \
1428 0x1d2e00 - 0x1d3dff: VD2 (DG2 only) \
1429 0x1d3e00 - 0x1d3eff: reserved \
1430 0x1d3f00 - 0x1d3fff: VD2 */ \
1431 GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), /* \
1432 0x1d4000 - 0x1d6bff: VD3 \
1433 0x1d6c00 - 0x1d6cff: reserved \
1434 0x1d6d00 - 0x1d6dff: VD3 \
1435 0x1d6e00 - 0x1d7fff: reserved */ \
1436 GEN_FW_RANGE(0x1d8000, 0x1dffff, FORCEWAKE_MEDIA_VEBOX1), /* \
1437 0x1d8000 - 0x1da0ff: VE1 \
1438 0x1da100 - 0x1dffff: reserved */ \
1439 GEN_FW_RANGE(0x1e0000, 0x1e3fff, FORCEWAKE_MEDIA_VDBOX4), /* \
1440 0x1e0000 - 0x1e2bff: VD4 \
1441 0x1e2c00 - 0x1e2cff: reserved \
1442 0x1e2d00 - 0x1e2dff: VD4 \
1443 0x1e2e00 - 0x1e3eff: reserved \
1444 0x1e3f00 - 0x1e3fff: VD4 */ \
1445 GEN_FW_RANGE(0x1e4000, 0x1e7fff, FORCEWAKE_MEDIA_VDBOX5), /* \
1446 0x1e4000 - 0x1e6bff: VD5 \
1447 0x1e6c00 - 0x1e6cff: reserved \
1448 0x1e6d00 - 0x1e6dff: VD5 \
1449 0x1e6e00 - 0x1e7fff: reserved */ \
1450 GEN_FW_RANGE(0x1e8000, 0x1effff, FORCEWAKE_MEDIA_VEBOX2), /* \
1451 0x1e8000 - 0x1ea0ff: VE2 \
1452 0x1ea100 - 0x1effff: reserved */ \
1453 GEN_FW_RANGE(0x1f0000, 0x1f3fff, FORCEWAKE_MEDIA_VDBOX6), /* \
1454 0x1f0000 - 0x1f2bff: VD6 \
1455 0x1f2c00 - 0x1f2cff: reserved \
1456 0x1f2d00 - 0x1f2dff: VD6 \
1457 0x1f2e00 - 0x1f3eff: reserved \
1458 0x1f3f00 - 0x1f3fff: VD6 */ \
1459 GEN_FW_RANGE(0x1f4000, 0x1f7fff, FORCEWAKE_MEDIA_VDBOX7), /* \
1460 0x1f4000 - 0x1f6bff: VD7 \
1461 0x1f6c00 - 0x1f6cff: reserved \
1462 0x1f6d00 - 0x1f6dff: VD7 \
1463 0x1f6e00 - 0x1f7fff: reserved */ \
bfac1e2b 1464 GEN_FW_RANGE(0x1f8000, 0x1fa0ff, FORCEWAKE_MEDIA_VEBOX3),
e0531636
MR
1465
1466static const struct intel_forcewake_range __xehp_fw_ranges[] = {
1467 XEHP_FWRANGES(FORCEWAKE_GT)
1468};
1469
1470static const struct intel_forcewake_range __dg2_fw_ranges[] = {
1471 XEHP_FWRANGES(FORCEWAKE_RENDER)
bfac1e2b
MR
1472};
1473
907b28c5 1474static void
6ebc9692 1475ilk_dummy_write(struct intel_uncore *uncore)
907b28c5
CW
1476{
1477 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
1478 * the chip from rc6 before touching it for real. MI_MODE is masked,
1479 * hence harmless to write 0 into. */
6cc5ca76 1480 __raw_uncore_write32(uncore, MI_MODE, 0);
907b28c5
CW
1481}
1482
1483static void
2cf7bf6f 1484__unclaimed_reg_debug(struct intel_uncore *uncore,
9c053501
MK
1485 const i915_reg_t reg,
1486 const bool read,
1487 const bool before)
907b28c5 1488{
a9f236d1
PB
1489 if (drm_WARN(&uncore->i915->drm,
1490 check_for_unclaimed_mmio(uncore) && !before,
1491 "Unclaimed %s register 0x%x\n",
1492 read ? "read from" : "write to",
1493 i915_mmio_reg_offset(reg)))
4f044a88 1494 /* Only report the first N failures */
8a25c4be 1495 uncore->i915->params.mmio_debug--;
907b28c5
CW
1496}
1497
9c053501 1498static inline void
2cf7bf6f 1499unclaimed_reg_debug(struct intel_uncore *uncore,
9c053501
MK
1500 const i915_reg_t reg,
1501 const bool read,
1502 const bool before)
1503{
8a25c4be 1504 if (likely(!uncore->i915->params.mmio_debug))
9c053501
MK
1505 return;
1506
0a9b2630
DCS
1507 /* interrupts are disabled and re-enabled around uncore->lock usage */
1508 lockdep_assert_held(&uncore->lock);
1509
1510 if (before)
1511 spin_lock(&uncore->debug->lock);
1512
2cf7bf6f 1513 __unclaimed_reg_debug(uncore, reg, read, before);
0a9b2630
DCS
1514
1515 if (!before)
1516 spin_unlock(&uncore->debug->lock);
9c053501
MK
1517}
1518
0e65ce24
CW
1519#define __vgpu_read(x) \
1520static u##x \
1521vgpu_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1522 u##x val = __raw_uncore_read##x(uncore, reg); \
1523 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1524 return val; \
1525}
1526__vgpu_read(8)
1527__vgpu_read(16)
1528__vgpu_read(32)
1529__vgpu_read(64)
1530
51f67885 1531#define GEN2_READ_HEADER(x) \
5d738795 1532 u##x val = 0; \
87b391b9 1533 assert_rpm_wakelock_held(uncore->rpm);
5d738795 1534
51f67885 1535#define GEN2_READ_FOOTER \
5d738795
BW
1536 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1537 return val
1538
51f67885 1539#define __gen2_read(x) \
0b274481 1540static u##x \
a2b4abfc 1541gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
51f67885 1542 GEN2_READ_HEADER(x); \
6cc5ca76 1543 val = __raw_uncore_read##x(uncore, reg); \
51f67885 1544 GEN2_READ_FOOTER; \
3967018e
BW
1545}
1546
1547#define __gen5_read(x) \
1548static u##x \
a2b4abfc 1549gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
51f67885 1550 GEN2_READ_HEADER(x); \
6ebc9692 1551 ilk_dummy_write(uncore); \
6cc5ca76 1552 val = __raw_uncore_read##x(uncore, reg); \
51f67885 1553 GEN2_READ_FOOTER; \
3967018e
BW
1554}
1555
51f67885
CW
1556__gen5_read(8)
1557__gen5_read(16)
1558__gen5_read(32)
1559__gen5_read(64)
1560__gen2_read(8)
1561__gen2_read(16)
1562__gen2_read(32)
1563__gen2_read(64)
1564
1565#undef __gen5_read
1566#undef __gen2_read
1567
1568#undef GEN2_READ_FOOTER
1569#undef GEN2_READ_HEADER
1570
1571#define GEN6_READ_HEADER(x) \
f0f59a00 1572 u32 offset = i915_mmio_reg_offset(reg); \
51f67885
CW
1573 unsigned long irqflags; \
1574 u##x val = 0; \
87b391b9 1575 assert_rpm_wakelock_held(uncore->rpm); \
272c7e52 1576 spin_lock_irqsave(&uncore->lock, irqflags); \
2cf7bf6f 1577 unclaimed_reg_debug(uncore, reg, true, true)
51f67885
CW
1578
1579#define GEN6_READ_FOOTER \
2cf7bf6f 1580 unclaimed_reg_debug(uncore, reg, true, false); \
272c7e52 1581 spin_unlock_irqrestore(&uncore->lock, irqflags); \
51f67885
CW
1582 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1583 return val
1584
f568eeee 1585static noinline void ___force_wake_auto(struct intel_uncore *uncore,
c521b0c8 1586 enum forcewake_domains fw_domains)
b2cff0db
CW
1587{
1588 struct intel_uncore_forcewake_domain *domain;
d2dc94bc
CW
1589 unsigned int tmp;
1590
f568eeee 1591 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
b2cff0db 1592
f568eeee 1593 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)
c521b0c8
TU
1594 fw_domain_arm_timer(domain);
1595
5716c8c6 1596 fw_domains_get(uncore, fw_domains);
c521b0c8
TU
1597}
1598
f568eeee 1599static inline void __force_wake_auto(struct intel_uncore *uncore,
c521b0c8
TU
1600 enum forcewake_domains fw_domains)
1601{
77adbd8f 1602 GEM_BUG_ON(!fw_domains);
b2cff0db 1603
003342a5 1604 /* Turn on all requested but inactive supported forcewake domains. */
f568eeee
DCS
1605 fw_domains &= uncore->fw_domains;
1606 fw_domains &= ~uncore->fw_domains_active;
b2cff0db 1607
c521b0c8 1608 if (fw_domains)
f568eeee 1609 ___force_wake_auto(uncore, fw_domains);
b2cff0db
CW
1610}
1611
e5b32ae3 1612#define __gen_fwtable_read(x) \
3967018e 1613static u##x \
e5b32ae3
MR
1614fwtable_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) \
1615{ \
6863b76c 1616 enum forcewake_domains fw_engine; \
51f67885 1617 GEN6_READ_HEADER(x); \
e5b32ae3 1618 fw_engine = __fwtable_reg_read_fw_domains(uncore, offset); \
6a42d0f4 1619 if (fw_engine) \
272c7e52 1620 __force_wake_auto(uncore, fw_engine); \
6cc5ca76 1621 val = __raw_uncore_read##x(uncore, reg); \
51f67885 1622 GEN6_READ_FOOTER; \
940aece4 1623}
ccb2acea 1624
e5b32ae3
MR
1625static enum forcewake_domains
1626fwtable_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) {
1627 return __fwtable_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg));
1628}
ccb2acea 1629
e5b32ae3
MR
1630__gen_fwtable_read(8)
1631__gen_fwtable_read(16)
1632__gen_fwtable_read(32)
1633__gen_fwtable_read(64)
ccb2acea 1634
e5b32ae3 1635#undef __gen_fwtable_read
51f67885
CW
1636#undef GEN6_READ_FOOTER
1637#undef GEN6_READ_HEADER
5d738795 1638
51f67885 1639#define GEN2_WRITE_HEADER \
5d738795 1640 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
87b391b9 1641 assert_rpm_wakelock_held(uncore->rpm); \
907b28c5 1642
51f67885 1643#define GEN2_WRITE_FOOTER
0d965301 1644
51f67885 1645#define __gen2_write(x) \
0b274481 1646static void \
a2b4abfc 1647gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
51f67885 1648 GEN2_WRITE_HEADER; \
6cc5ca76 1649 __raw_uncore_write##x(uncore, reg, val); \
51f67885 1650 GEN2_WRITE_FOOTER; \
4032ef43
BW
1651}
1652
1653#define __gen5_write(x) \
1654static void \
a2b4abfc 1655gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
51f67885 1656 GEN2_WRITE_HEADER; \
6ebc9692 1657 ilk_dummy_write(uncore); \
6cc5ca76 1658 __raw_uncore_write##x(uncore, reg, val); \
51f67885 1659 GEN2_WRITE_FOOTER; \
4032ef43
BW
1660}
1661
51f67885
CW
1662__gen5_write(8)
1663__gen5_write(16)
1664__gen5_write(32)
51f67885
CW
1665__gen2_write(8)
1666__gen2_write(16)
1667__gen2_write(32)
51f67885
CW
1668
1669#undef __gen5_write
1670#undef __gen2_write
1671
1672#undef GEN2_WRITE_FOOTER
1673#undef GEN2_WRITE_HEADER
1674
1675#define GEN6_WRITE_HEADER \
f0f59a00 1676 u32 offset = i915_mmio_reg_offset(reg); \
51f67885
CW
1677 unsigned long irqflags; \
1678 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
87b391b9 1679 assert_rpm_wakelock_held(uncore->rpm); \
272c7e52 1680 spin_lock_irqsave(&uncore->lock, irqflags); \
2cf7bf6f 1681 unclaimed_reg_debug(uncore, reg, false, true)
51f67885
CW
1682
1683#define GEN6_WRITE_FOOTER \
2cf7bf6f 1684 unclaimed_reg_debug(uncore, reg, false, false); \
272c7e52 1685 spin_unlock_irqrestore(&uncore->lock, irqflags)
51f67885 1686
4032ef43
BW
1687#define __gen6_write(x) \
1688static void \
a2b4abfc 1689gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
51f67885 1690 GEN6_WRITE_HEADER; \
a338908c 1691 if (NEEDS_FORCE_WAKE(offset)) \
6ebc9692 1692 __gen6_gt_wait_for_fifo(uncore); \
6cc5ca76 1693 __raw_uncore_write##x(uncore, reg, val); \
51f67885 1694 GEN6_WRITE_FOOTER; \
4032ef43 1695}
ccb2acea
DCS
1696__gen6_write(8)
1697__gen6_write(16)
1698__gen6_write(32)
4032ef43 1699
aef02736 1700#define __gen_fwtable_write(x) \
ab2aa47e 1701static void \
aef02736 1702fwtable_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
6863b76c 1703 enum forcewake_domains fw_engine; \
51f67885 1704 GEN6_WRITE_HEADER; \
aef02736 1705 fw_engine = __fwtable_reg_write_fw_domains(uncore, offset); \
6a42d0f4 1706 if (fw_engine) \
272c7e52 1707 __force_wake_auto(uncore, fw_engine); \
6cc5ca76 1708 __raw_uncore_write##x(uncore, reg, val); \
51f67885 1709 GEN6_WRITE_FOOTER; \
1938e59a 1710}
4032ef43 1711
aef02736
MR
1712static enum forcewake_domains
1713fwtable_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
1714{
1715 return __fwtable_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg));
1716}
ccb2acea 1717
aef02736
MR
1718__gen_fwtable_write(8)
1719__gen_fwtable_write(16)
1720__gen_fwtable_write(32)
ccb2acea 1721
aef02736 1722#undef __gen_fwtable_write
51f67885
CW
1723#undef GEN6_WRITE_FOOTER
1724#undef GEN6_WRITE_HEADER
907b28c5 1725
0e65ce24
CW
1726#define __vgpu_write(x) \
1727static void \
1728vgpu_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1729 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1730 __raw_uncore_write##x(uncore, reg, val); \
1731}
1732__vgpu_write(8)
1733__vgpu_write(16)
1734__vgpu_write(32)
1735
ccb2acea 1736#define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \
43d942a7 1737do { \
f7de5027
DCS
1738 (uncore)->funcs.mmio_writeb = x##_write8; \
1739 (uncore)->funcs.mmio_writew = x##_write16; \
1740 (uncore)->funcs.mmio_writel = x##_write32; \
43d942a7
YZ
1741} while (0)
1742
ccb2acea 1743#define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x) \
43d942a7 1744do { \
f7de5027
DCS
1745 (uncore)->funcs.mmio_readb = x##_read8; \
1746 (uncore)->funcs.mmio_readw = x##_read16; \
1747 (uncore)->funcs.mmio_readl = x##_read32; \
1748 (uncore)->funcs.mmio_readq = x##_read64; \
43d942a7
YZ
1749} while (0)
1750
ccb2acea
DCS
1751#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
1752do { \
1753 ASSIGN_RAW_WRITE_MMIO_VFUNCS((uncore), x); \
1754 (uncore)->funcs.write_fw_domains = x##_reg_write_fw_domains; \
1755} while (0)
1756
1757#define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
1758do { \
1759 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x); \
1760 (uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \
1761} while (0)
05a2fb15 1762
f833cdb0
DCS
1763static int __fw_domain_init(struct intel_uncore *uncore,
1764 enum forcewake_domain_id domain_id,
1765 i915_reg_t reg_set,
1766 i915_reg_t reg_ack)
05a2fb15
MK
1767{
1768 struct intel_uncore_forcewake_domain *d;
1769
f833cdb0
DCS
1770 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
1771 GEM_BUG_ON(uncore->fw_domain[domain_id]);
05a2fb15 1772
50d84418 1773 if (i915_inject_probe_failure(uncore->i915))
f833cdb0 1774 return -ENOMEM;
05a2fb15 1775
f833cdb0
DCS
1776 d = kzalloc(sizeof(*d), GFP_KERNEL);
1777 if (!d)
1778 return -ENOMEM;
05a2fb15 1779
a9f236d1
PB
1780 drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_set));
1781 drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_ack));
6e3955a5 1782
f833cdb0 1783 d->uncore = uncore;
05a2fb15 1784 d->wake_count = 0;
25286aac
DCS
1785 d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set);
1786 d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack);
05a2fb15 1787
05a2fb15
MK
1788 d->id = domain_id;
1789
33c582c1 1790 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
55e3c170 1791 BUILD_BUG_ON(FORCEWAKE_GT != (1 << FW_DOMAIN_ID_GT));
33c582c1 1792 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
a89a70a8
DCS
1793 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0));
1794 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1));
1795 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2));
1796 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3));
bfac1e2b
MR
1797 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX4 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX4));
1798 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX5 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX5));
1799 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX6 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX6));
1800 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX7 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX7));
a89a70a8
DCS
1801 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0));
1802 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1));
bfac1e2b
MR
1803 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX2));
1804 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX3));
a89a70a8 1805
d2dc94bc 1806 d->mask = BIT(domain_id);
33c582c1 1807
a57a4a67
TU
1808 hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1809 d->timer.function = intel_uncore_fw_release_timer;
05a2fb15 1810
535d8d27 1811 uncore->fw_domains |= BIT(domain_id);
f9b3927a 1812
159367bb 1813 fw_domain_reset(d);
f833cdb0
DCS
1814
1815 uncore->fw_domain[domain_id] = d;
1816
1817 return 0;
05a2fb15
MK
1818}
1819
f7de5027 1820static void fw_domain_fini(struct intel_uncore *uncore,
26376a7e
OM
1821 enum forcewake_domain_id domain_id)
1822{
1823 struct intel_uncore_forcewake_domain *d;
1824
f833cdb0 1825 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
26376a7e 1826
f833cdb0
DCS
1827 d = fetch_and_zero(&uncore->fw_domain[domain_id]);
1828 if (!d)
1829 return;
26376a7e 1830
f833cdb0 1831 uncore->fw_domains &= ~BIT(domain_id);
a9f236d1
PB
1832 drm_WARN_ON(&uncore->i915->drm, d->wake_count);
1833 drm_WARN_ON(&uncore->i915->drm, hrtimer_cancel(&d->timer));
f833cdb0
DCS
1834 kfree(d);
1835}
26376a7e 1836
f833cdb0
DCS
1837static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore)
1838{
1839 struct intel_uncore_forcewake_domain *d;
1840 int tmp;
1841
1842 for_each_fw_domain(d, uncore, tmp)
1843 fw_domain_fini(uncore, d->id);
26376a7e
OM
1844}
1845
5716c8c6
DA
1846static const struct intel_uncore_fw_get uncore_get_fallback = {
1847 .force_wake_get = fw_domains_get_with_fallback
1848};
1849
1850static const struct intel_uncore_fw_get uncore_get_normal = {
1851 .force_wake_get = fw_domains_get_normal,
1852};
1853
1854static const struct intel_uncore_fw_get uncore_get_thread_status = {
1855 .force_wake_get = fw_domains_get_with_thread_status
1856};
1857
f833cdb0 1858static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
0b274481 1859{
01385758 1860 struct drm_i915_private *i915 = uncore->i915;
f833cdb0 1861 int ret = 0;
f7de5027 1862
2e81bc61 1863 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
3225b2f9 1864
f833cdb0
DCS
1865#define fw_domain_init(uncore__, id__, set__, ack__) \
1866 (ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__))))
1867
651e7d48 1868 if (GRAPHICS_VER(i915) >= 11) {
242613af 1869 /* we'll prune the domains of missing engines later */
792592e7 1870 intel_engine_mask_t emask = INTEL_INFO(i915)->platform_engine_mask;
a89a70a8
DCS
1871 int i;
1872
5716c8c6 1873 uncore->fw_get_funcs = &uncore_get_fallback;
f7de5027 1874 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
a89a70a8
DCS
1875 FORCEWAKE_RENDER_GEN9,
1876 FORCEWAKE_ACK_RENDER_GEN9);
55e3c170
MR
1877 fw_domain_init(uncore, FW_DOMAIN_ID_GT,
1878 FORCEWAKE_GT_GEN9,
1879 FORCEWAKE_ACK_GT_GEN9);
f833cdb0 1880
a89a70a8 1881 for (i = 0; i < I915_MAX_VCS; i++) {
242613af 1882 if (!__HAS_ENGINE(emask, _VCS(i)))
a89a70a8
DCS
1883 continue;
1884
f7de5027 1885 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
a89a70a8
DCS
1886 FORCEWAKE_MEDIA_VDBOX_GEN11(i),
1887 FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
1888 }
1889 for (i = 0; i < I915_MAX_VECS; i++) {
242613af 1890 if (!__HAS_ENGINE(emask, _VECS(i)))
a89a70a8
DCS
1891 continue;
1892
f7de5027 1893 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
a89a70a8
DCS
1894 FORCEWAKE_MEDIA_VEBOX_GEN11(i),
1895 FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
1896 }
651e7d48 1897 } else if (IS_GRAPHICS_VER(i915, 9, 10)) {
5716c8c6 1898 uncore->fw_get_funcs = &uncore_get_fallback;
f7de5027 1899 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
05a2fb15
MK
1900 FORCEWAKE_RENDER_GEN9,
1901 FORCEWAKE_ACK_RENDER_GEN9);
55e3c170
MR
1902 fw_domain_init(uncore, FW_DOMAIN_ID_GT,
1903 FORCEWAKE_GT_GEN9,
1904 FORCEWAKE_ACK_GT_GEN9);
f7de5027 1905 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
05a2fb15 1906 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
f7de5027 1907 } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
5716c8c6 1908 uncore->fw_get_funcs = &uncore_get_normal;
f7de5027 1909 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
05a2fb15 1910 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
f7de5027 1911 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
05a2fb15 1912 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
f7de5027 1913 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5716c8c6 1914 uncore->fw_get_funcs = &uncore_get_thread_status;
f7de5027 1915 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
05a2fb15 1916 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
f7de5027 1917 } else if (IS_IVYBRIDGE(i915)) {
0b274481
BW
1918 u32 ecobus;
1919
1920 /* IVB configs may use multi-threaded forcewake */
1921
1922 /* A small trick here - if the bios hasn't configured
1923 * MT forcewake, and if the device is in RC6, then
1924 * force_wake_mt_get will not wake the device and the
1925 * ECOBUS read will return zero. Which will be
1926 * (correctly) interpreted by the test below as MT
1927 * forcewake being disabled.
1928 */
5716c8c6 1929 uncore->fw_get_funcs = &uncore_get_thread_status;
05a2fb15 1930
f9b3927a
MK
1931 /* We need to init first for ECOBUS access and then
1932 * determine later if we want to reinit, in case of MT access is
6ea2556f
MK
1933 * not working. In this stage we don't know which flavour this
1934 * ivb is, so it is better to reset also the gen6 fw registers
1935 * before the ecobus check.
f9b3927a 1936 */
6ea2556f 1937
6cc5ca76 1938 __raw_uncore_write32(uncore, FORCEWAKE, 0);
6ebc9692 1939 __raw_posting_read(uncore, ECOBUS);
6ea2556f 1940
f833cdb0
DCS
1941 ret = __fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1942 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1943 if (ret)
1944 goto out;
f9b3927a 1945
f7de5027
DCS
1946 spin_lock_irq(&uncore->lock);
1947 fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
6cc5ca76 1948 ecobus = __raw_uncore_read32(uncore, ECOBUS);
f7de5027
DCS
1949 fw_domains_put(uncore, FORCEWAKE_RENDER);
1950 spin_unlock_irq(&uncore->lock);
0b274481 1951
05a2fb15 1952 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
d0208cfa
WK
1953 drm_info(&i915->drm, "No MT forcewake available on Ivybridge, this can result in issues\n");
1954 drm_info(&i915->drm, "when using vblank-synced partial screen updates.\n");
f833cdb0 1955 fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER);
f7de5027 1956 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
05a2fb15 1957 FORCEWAKE, FORCEWAKE_ACK);
0b274481 1958 }
651e7d48 1959 } else if (GRAPHICS_VER(i915) == 6) {
5716c8c6 1960 uncore->fw_get_funcs = &uncore_get_thread_status;
f7de5027 1961 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
05a2fb15 1962 FORCEWAKE, FORCEWAKE_ACK);
0b274481 1963 }
3225b2f9 1964
f833cdb0
DCS
1965#undef fw_domain_init
1966
3225b2f9 1967 /* All future platforms are expected to require complex power gating */
48a1b8d4 1968 drm_WARN_ON(&i915->drm, !ret && uncore->fw_domains == 0);
f833cdb0
DCS
1969
1970out:
1971 if (ret)
1972 intel_uncore_fw_domains_fini(uncore);
1973
1974 return ret;
f9b3927a
MK
1975}
1976
f7de5027 1977#define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \
15157970 1978{ \
f7de5027 1979 (uncore)->fw_domains_table = \
15157970 1980 (struct intel_forcewake_range *)(d); \
f7de5027 1981 (uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \
15157970
TU
1982}
1983
6cdbb101
MR
1984#define ASSIGN_SHADOW_TABLE(uncore, d) \
1985{ \
1986 (uncore)->shadowed_reg_table = d; \
1987 (uncore)->shadowed_reg_table_entries = ARRAY_SIZE((d)); \
1988}
1989
264ec1a8
HG
1990static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
1991 unsigned long action, void *data)
1992{
9102650f
DCS
1993 struct intel_uncore *uncore = container_of(nb,
1994 struct intel_uncore, pmic_bus_access_nb);
264ec1a8
HG
1995
1996 switch (action) {
1997 case MBI_PMIC_BUS_ACCESS_BEGIN:
1998 /*
1999 * forcewake all now to make sure that we don't need to do a
2000 * forcewake later which on systems where this notifier gets
2001 * called requires the punit to access to the shared pmic i2c
2002 * bus, which will be busy after this notification, leading to:
2003 * "render: timed out waiting for forcewake ack request."
2004 * errors.
ce30560c
HG
2005 *
2006 * The notifier is unregistered during intel_runtime_suspend(),
2007 * so it's ok to access the HW here without holding a RPM
2008 * wake reference -> disable wakeref asserts for the time of
2009 * the access.
264ec1a8 2010 */
9102650f
DCS
2011 disable_rpm_wakeref_asserts(uncore->rpm);
2012 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
2013 enable_rpm_wakeref_asserts(uncore->rpm);
264ec1a8
HG
2014 break;
2015 case MBI_PMIC_BUS_ACCESS_END:
9102650f 2016 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
264ec1a8
HG
2017 break;
2018 }
2019
2020 return NOTIFY_OK;
2021}
2022
25286aac
DCS
2023static int uncore_mmio_setup(struct intel_uncore *uncore)
2024{
01385758 2025 struct drm_i915_private *i915 = uncore->i915;
8ff5446a 2026 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
25286aac
DCS
2027 int mmio_bar;
2028 int mmio_size;
2029
651e7d48 2030 mmio_bar = GRAPHICS_VER(i915) == 2 ? 1 : 0;
25286aac
DCS
2031 /*
2032 * Before gen4, the registers and the GTT are behind different BARs.
2033 * However, from gen4 onwards, the registers and the GTT are shared
2034 * in the same BAR, so we want to restrict this ioremap from
2035 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
2036 * the register BAR remains the same size for all the earlier
2037 * generations up to Ironlake.
eafeb204 2038 * For dgfx chips register range is expanded to 4MB.
25286aac 2039 */
651e7d48 2040 if (GRAPHICS_VER(i915) < 5)
25286aac 2041 mmio_size = 512 * 1024;
eafeb204
VSD
2042 else if (IS_DGFX(i915))
2043 mmio_size = 4 * 1024 * 1024;
25286aac
DCS
2044 else
2045 mmio_size = 2 * 1024 * 1024;
eafeb204 2046
25286aac
DCS
2047 uncore->regs = pci_iomap(pdev, mmio_bar, mmio_size);
2048 if (uncore->regs == NULL) {
d0208cfa 2049 drm_err(&i915->drm, "failed to map registers\n");
25286aac
DCS
2050 return -EIO;
2051 }
2052
2053 return 0;
2054}
2055
2056static void uncore_mmio_cleanup(struct intel_uncore *uncore)
2057{
8ff5446a 2058 struct pci_dev *pdev = to_pci_dev(uncore->i915->drm.dev);
25286aac
DCS
2059
2060 pci_iounmap(pdev, uncore->regs);
2061}
2062
01385758
DCS
2063void intel_uncore_init_early(struct intel_uncore *uncore,
2064 struct drm_i915_private *i915)
6cbe8830
DCS
2065{
2066 spin_lock_init(&uncore->lock);
01385758
DCS
2067 uncore->i915 = i915;
2068 uncore->rpm = &i915->runtime_pm;
0a9b2630 2069 uncore->debug = &i915->mmio_debug;
6cbe8830 2070}
25286aac 2071
2e81bc61 2072static void uncore_raw_init(struct intel_uncore *uncore)
f9b3927a 2073{
2e81bc61 2074 GEM_BUG_ON(intel_uncore_has_forcewake(uncore));
25286aac 2075
0e65ce24
CW
2076 if (intel_vgpu_active(uncore->i915)) {
2077 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, vgpu);
2078 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, vgpu);
651e7d48 2079 } else if (GRAPHICS_VER(uncore->i915) == 5) {
2e81bc61
DCS
2080 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5);
2081 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5);
2082 } else {
2083 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2);
2084 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2);
2085 }
2086}
f7de5027 2087
f833cdb0 2088static int uncore_forcewake_init(struct intel_uncore *uncore)
2e81bc61
DCS
2089{
2090 struct drm_i915_private *i915 = uncore->i915;
f833cdb0 2091 int ret;
cf9d2890 2092
2e81bc61 2093 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
5a0ba777 2094
f833cdb0
DCS
2095 ret = intel_uncore_fw_domains_init(uncore);
2096 if (ret)
2097 return ret;
2e81bc61 2098 forcewake_early_sanitize(uncore, 0);
75714940 2099
54fc4f13
MR
2100 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
2101
e0531636
MR
2102 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
2103 ASSIGN_FW_DOMAINS_TABLE(uncore, __dg2_fw_ranges);
c74e66d4 2104 ASSIGN_SHADOW_TABLE(uncore, dg2_shadowed_regs);
aef02736 2105 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
e0531636 2106 } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
bfac1e2b 2107 ASSIGN_FW_DOMAINS_TABLE(uncore, __xehp_fw_ranges);
6cdbb101 2108 ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs);
aef02736 2109 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
bfac1e2b 2110 } else if (GRAPHICS_VER(i915) >= 12) {
cf82d9dd 2111 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges);
6cdbb101 2112 ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs);
aef02736 2113 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
bfac1e2b
MR
2114 } else if (GRAPHICS_VER(i915) == 11) {
2115 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
6cdbb101 2116 ASSIGN_SHADOW_TABLE(uncore, gen11_shadowed_regs);
aef02736 2117 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
bfac1e2b
MR
2118 } else if (IS_GRAPHICS_VER(i915, 9, 10)) {
2119 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
6cdbb101 2120 ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
bfac1e2b 2121 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
bfac1e2b
MR
2122 } else if (IS_CHERRYVIEW(i915)) {
2123 ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
6cdbb101 2124 ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
bfac1e2b 2125 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
bfac1e2b 2126 } else if (GRAPHICS_VER(i915) == 8) {
1ab2b4cd 2127 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges);
6cdbb101 2128 ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
09b2a597 2129 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
bfac1e2b
MR
2130 } else if (IS_VALLEYVIEW(i915)) {
2131 ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges);
2132 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
bfac1e2b 2133 } else if (IS_GRAPHICS_VER(i915, 6, 7)) {
1ab2b4cd 2134 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges);
bfac1e2b 2135 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
3967018e 2136 }
ed493883 2137
2e81bc61
DCS
2138 uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier;
2139 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
f833cdb0
DCS
2140
2141 return 0;
2e81bc61
DCS
2142}
2143
2144int intel_uncore_init_mmio(struct intel_uncore *uncore)
2145{
2146 struct drm_i915_private *i915 = uncore->i915;
2147 int ret;
2148
2149 ret = uncore_mmio_setup(uncore);
2150 if (ret)
2151 return ret;
2152
c256af0d
MR
2153 /*
2154 * The boot firmware initializes local memory and assesses its health.
2155 * If memory training fails, the punit will have been instructed to
2156 * keep the GT powered down; we won't be able to communicate with it
2157 * and we should not continue with driver initialization.
2158 */
2159 if (IS_DGFX(i915) &&
2160 !(__raw_uncore_read32(uncore, GU_CNTL) & LMEM_INIT)) {
2161 drm_err(&i915->drm, "LMEM not initialized by firmware\n");
2162 return -ENODEV;
2163 }
2164
d70cc074 2165 if (GRAPHICS_VER(i915) > 5 && !intel_vgpu_active(i915))
2e81bc61
DCS
2166 uncore->flags |= UNCORE_HAS_FORCEWAKE;
2167
f833cdb0 2168 if (!intel_uncore_has_forcewake(uncore)) {
2e81bc61 2169 uncore_raw_init(uncore);
f833cdb0
DCS
2170 } else {
2171 ret = uncore_forcewake_init(uncore);
2172 if (ret)
2173 goto out_mmio_cleanup;
2174 }
2e81bc61 2175
ccb2acea 2176 /* make sure fw funcs are set if and only if we have fw*/
5716c8c6 2177 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->fw_get_funcs);
ccb2acea
DCS
2178 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains);
2179 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains);
2180
2cf7bf6f
DCS
2181 if (HAS_FPGA_DBG_UNCLAIMED(i915))
2182 uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED;
2183
2184 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
2185 uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED;
2186
651e7d48 2187 if (IS_GRAPHICS_VER(i915, 6, 7))
2cf7bf6f
DCS
2188 uncore->flags |= UNCORE_HAS_FIFO;
2189
2e81bc61 2190 /* clear out unclaimed reg detection bit */
0a9b2630 2191 if (intel_uncore_unclaimed_mmio(uncore))
d0208cfa 2192 drm_dbg(&i915->drm, "unclaimed mmio detected on uncore init, clearing\n");
25286aac
DCS
2193
2194 return 0;
f833cdb0
DCS
2195
2196out_mmio_cleanup:
2197 uncore_mmio_cleanup(uncore);
2198
2199 return ret;
0b274481
BW
2200}
2201
26376a7e
OM
2202/*
2203 * We might have detected that some engines are fused off after we initialized
2204 * the forcewake domains. Prune them, to make sure they only reference existing
2205 * engines.
2206 */
242613af
DCS
2207void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
2208 struct intel_gt *gt)
26376a7e 2209{
2e81bc61
DCS
2210 enum forcewake_domains fw_domains = uncore->fw_domains;
2211 enum forcewake_domain_id domain_id;
2212 int i;
f7de5027 2213
651e7d48 2214 if (!intel_uncore_has_forcewake(uncore) || GRAPHICS_VER(uncore->i915) < 11)
2e81bc61 2215 return;
26376a7e 2216
2e81bc61
DCS
2217 for (i = 0; i < I915_MAX_VCS; i++) {
2218 domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
26376a7e 2219
242613af 2220 if (HAS_ENGINE(gt, _VCS(i)))
2e81bc61 2221 continue;
26376a7e 2222
bfac1e2b
MR
2223 /*
2224 * Starting with XeHP, the power well for an even-numbered
2225 * VDBOX is also used for shared units within the
2226 * media slice such as SFC. So even if the engine
2227 * itself is fused off, we still need to initialize
2228 * the forcewake domain if any of the other engines
2229 * in the same media slice are present.
2230 */
2231 if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 50) && i % 2 == 0) {
2232 if ((i + 1 < I915_MAX_VCS) && HAS_ENGINE(gt, _VCS(i + 1)))
2233 continue;
2234
2235 if (HAS_ENGINE(gt, _VECS(i / 2)))
2236 continue;
2237 }
2238
2e81bc61
DCS
2239 if (fw_domains & BIT(domain_id))
2240 fw_domain_fini(uncore, domain_id);
2241 }
26376a7e 2242
2e81bc61
DCS
2243 for (i = 0; i < I915_MAX_VECS; i++) {
2244 domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
26376a7e 2245
242613af 2246 if (HAS_ENGINE(gt, _VECS(i)))
2e81bc61 2247 continue;
26376a7e 2248
2e81bc61
DCS
2249 if (fw_domains & BIT(domain_id))
2250 fw_domain_fini(uncore, domain_id);
26376a7e
OM
2251 }
2252}
2253
3de6f852 2254void intel_uncore_fini_mmio(struct intel_uncore *uncore)
0b274481 2255{
2e81bc61
DCS
2256 if (intel_uncore_has_forcewake(uncore)) {
2257 iosf_mbi_punit_acquire();
2258 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
2259 &uncore->pmic_bus_access_nb);
2260 intel_uncore_forcewake_reset(uncore);
f833cdb0 2261 intel_uncore_fw_domains_fini(uncore);
2e81bc61
DCS
2262 iosf_mbi_punit_release();
2263 }
2264
25286aac 2265 uncore_mmio_cleanup(uncore);
0b274481
BW
2266}
2267
3fd3a6ff
JL
2268static const struct reg_whitelist {
2269 i915_reg_t offset_ldw;
2270 i915_reg_t offset_udw;
33adf482
LDM
2271 u8 min_graphics_ver;
2272 u8 max_graphics_ver;
3fd3a6ff
JL
2273 u8 size;
2274} reg_read_whitelist[] = { {
2275 .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
2276 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
33adf482
LDM
2277 .min_graphics_ver = 4,
2278 .max_graphics_ver = 12,
3fd3a6ff
JL
2279 .size = 8
2280} };
907b28c5
CW
2281
2282int i915_reg_read_ioctl(struct drm_device *dev,
2283 void *data, struct drm_file *file)
2284{
8ed3a623
TU
2285 struct drm_i915_private *i915 = to_i915(dev);
2286 struct intel_uncore *uncore = &i915->uncore;
907b28c5 2287 struct drm_i915_reg_read *reg = data;
3fd3a6ff 2288 struct reg_whitelist const *entry;
538ef96b 2289 intel_wakeref_t wakeref;
3fd3a6ff
JL
2290 unsigned int flags;
2291 int remain;
2292 int ret = 0;
2293
2294 entry = reg_read_whitelist;
2295 remain = ARRAY_SIZE(reg_read_whitelist);
2296 while (remain) {
2297 u32 entry_offset = i915_mmio_reg_offset(entry->offset_ldw);
2298
2299 GEM_BUG_ON(!is_power_of_2(entry->size));
2300 GEM_BUG_ON(entry->size > 8);
2301 GEM_BUG_ON(entry_offset & (entry->size - 1));
2302
33adf482 2303 if (IS_GRAPHICS_VER(i915, entry->min_graphics_ver, entry->max_graphics_ver) &&
3fd3a6ff 2304 entry_offset == (reg->offset & -entry->size))
907b28c5 2305 break;
3fd3a6ff
JL
2306 entry++;
2307 remain--;
907b28c5
CW
2308 }
2309
3fd3a6ff 2310 if (!remain)
907b28c5
CW
2311 return -EINVAL;
2312
3fd3a6ff 2313 flags = reg->offset & (entry->size - 1);
648a9bc5 2314
c447ff7d 2315 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
d4225a53 2316 if (entry->size == 8 && flags == I915_REG_READ_8B_WA)
8ed3a623
TU
2317 reg->val = intel_uncore_read64_2x32(uncore,
2318 entry->offset_ldw,
2319 entry->offset_udw);
d4225a53 2320 else if (entry->size == 8 && flags == 0)
8ed3a623
TU
2321 reg->val = intel_uncore_read64(uncore,
2322 entry->offset_ldw);
d4225a53 2323 else if (entry->size == 4 && flags == 0)
8ed3a623 2324 reg->val = intel_uncore_read(uncore, entry->offset_ldw);
d4225a53 2325 else if (entry->size == 2 && flags == 0)
8ed3a623
TU
2326 reg->val = intel_uncore_read16(uncore,
2327 entry->offset_ldw);
d4225a53 2328 else if (entry->size == 1 && flags == 0)
8ed3a623
TU
2329 reg->val = intel_uncore_read8(uncore,
2330 entry->offset_ldw);
d4225a53
CW
2331 else
2332 ret = -EINVAL;
2333 }
3fd3a6ff 2334
cf67c70f 2335 return ret;
907b28c5
CW
2336}
2337
1758b90e 2338/**
1d1a9774 2339 * __intel_wait_for_register_fw - wait until register matches expected state
d2d551c0 2340 * @uncore: the struct intel_uncore
1758b90e
CW
2341 * @reg: the register to read
2342 * @mask: mask to apply to register value
2343 * @value: expected value
1d1a9774
MW
2344 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
2345 * @slow_timeout_ms: slow timeout in millisecond
2346 * @out_value: optional placeholder to hold registry value
1758b90e
CW
2347 *
2348 * This routine waits until the target register @reg contains the expected
3d466cd6
DV
2349 * @value after applying the @mask, i.e. it waits until ::
2350 *
669f3f2b 2351 * (intel_uncore_read_fw(uncore, reg) & mask) == value
3d466cd6 2352 *
1d1a9774 2353 * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
6976e74b 2354 * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
84d84cb7 2355 * must be not larger than 20,0000 microseconds.
1758b90e
CW
2356 *
2357 * Note that this routine assumes the caller holds forcewake asserted, it is
2358 * not suitable for very long waits. See intel_wait_for_register() if you
2359 * wish to wait without holding forcewake for the duration (i.e. you expect
2360 * the wait to be slow).
2361 *
e4661f14 2362 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
1758b90e 2363 */
d2d551c0 2364int __intel_wait_for_register_fw(struct intel_uncore *uncore,
1d1a9774 2365 i915_reg_t reg,
3fc7d86b
MW
2366 u32 mask,
2367 u32 value,
2368 unsigned int fast_timeout_us,
2369 unsigned int slow_timeout_ms,
1d1a9774 2370 u32 *out_value)
1758b90e 2371{
b79ffa91 2372 u32 reg_value = 0;
d2d551c0 2373#define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value)
1d1a9774
MW
2374 int ret;
2375
6976e74b 2376 /* Catch any overuse of this function */
84d84cb7
CW
2377 might_sleep_if(slow_timeout_ms);
2378 GEM_BUG_ON(fast_timeout_us > 20000);
b79ffa91 2379 GEM_BUG_ON(!fast_timeout_us && !slow_timeout_ms);
6976e74b 2380
84d84cb7
CW
2381 ret = -ETIMEDOUT;
2382 if (fast_timeout_us && fast_timeout_us <= 20000)
1d1a9774 2383 ret = _wait_for_atomic(done, fast_timeout_us, 0);
ff26ffa8 2384 if (ret && slow_timeout_ms)
1d1a9774 2385 ret = wait_for(done, slow_timeout_ms);
84d84cb7 2386
1d1a9774
MW
2387 if (out_value)
2388 *out_value = reg_value;
84d84cb7 2389
1758b90e
CW
2390 return ret;
2391#undef done
2392}
2393
2394/**
23fdbdd7 2395 * __intel_wait_for_register - wait until register matches expected state
baba6e57 2396 * @uncore: the struct intel_uncore
1758b90e
CW
2397 * @reg: the register to read
2398 * @mask: mask to apply to register value
2399 * @value: expected value
23fdbdd7
SP
2400 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
2401 * @slow_timeout_ms: slow timeout in millisecond
2402 * @out_value: optional placeholder to hold registry value
1758b90e
CW
2403 *
2404 * This routine waits until the target register @reg contains the expected
3d466cd6
DV
2405 * @value after applying the @mask, i.e. it waits until ::
2406 *
54b3f0e6 2407 * (intel_uncore_read(uncore, reg) & mask) == value
3d466cd6 2408 *
1758b90e
CW
2409 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
2410 *
e4661f14 2411 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
1758b90e 2412 */
97a04e0d
DCS
2413int __intel_wait_for_register(struct intel_uncore *uncore,
2414 i915_reg_t reg,
2415 u32 mask,
2416 u32 value,
2417 unsigned int fast_timeout_us,
2418 unsigned int slow_timeout_ms,
2419 u32 *out_value)
2420{
1758b90e 2421 unsigned fw =
4319382e 2422 intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
23fdbdd7 2423 u32 reg_value;
1758b90e
CW
2424 int ret;
2425
3df82dd4 2426 might_sleep_if(slow_timeout_ms);
05646543 2427
272c7e52
DCS
2428 spin_lock_irq(&uncore->lock);
2429 intel_uncore_forcewake_get__locked(uncore, fw);
05646543 2430
d2d551c0 2431 ret = __intel_wait_for_register_fw(uncore,
05646543 2432 reg, mask, value,
23fdbdd7 2433 fast_timeout_us, 0, &reg_value);
05646543 2434
272c7e52
DCS
2435 intel_uncore_forcewake_put__locked(uncore, fw);
2436 spin_unlock_irq(&uncore->lock);
05646543 2437
3df82dd4 2438 if (ret && slow_timeout_ms)
d2d551c0
DCS
2439 ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore,
2440 reg),
23fdbdd7
SP
2441 (reg_value & mask) == value,
2442 slow_timeout_ms * 1000, 10, 1000);
2443
39806c3f
VS
2444 /* just trace the final value */
2445 trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
2446
23fdbdd7
SP
2447 if (out_value)
2448 *out_value = reg_value;
1758b90e
CW
2449
2450 return ret;
d431440c
TE
2451}
2452
2cf7bf6f 2453bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
907b28c5 2454{
0a9b2630
DCS
2455 bool ret;
2456
2457 spin_lock_irq(&uncore->debug->lock);
2458 ret = check_for_unclaimed_mmio(uncore);
2459 spin_unlock_irq(&uncore->debug->lock);
2460
2461 return ret;
907b28c5 2462}
75714940 2463
bc3b9346 2464bool
2cf7bf6f 2465intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
75714940 2466{
a167b1e1
CW
2467 bool ret = false;
2468
0a9b2630 2469 spin_lock_irq(&uncore->debug->lock);
a167b1e1 2470
0a9b2630 2471 if (unlikely(uncore->debug->unclaimed_mmio_check <= 0))
a167b1e1 2472 goto out;
75714940 2473
0a9b2630 2474 if (unlikely(check_for_unclaimed_mmio(uncore))) {
8a25c4be 2475 if (!uncore->i915->params.mmio_debug) {
d0208cfa
WK
2476 drm_dbg(&uncore->i915->drm,
2477 "Unclaimed register detected, "
2478 "enabling oneshot unclaimed register reporting. "
2479 "Please use i915.mmio_debug=N for more information.\n");
8a25c4be 2480 uncore->i915->params.mmio_debug++;
7ef4ac6e 2481 }
0a9b2630 2482 uncore->debug->unclaimed_mmio_check--;
a167b1e1 2483 ret = true;
75714940 2484 }
bc3b9346 2485
a167b1e1 2486out:
0a9b2630 2487 spin_unlock_irq(&uncore->debug->lock);
a167b1e1
CW
2488
2489 return ret;
75714940 2490}
3756685a 2491
3756685a
TU
2492/**
2493 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
2494 * a register
4319382e 2495 * @uncore: pointer to struct intel_uncore
3756685a
TU
2496 * @reg: register in question
2497 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
2498 *
2499 * Returns a set of forcewake domains required to be taken with for example
2500 * intel_uncore_forcewake_get for the specified register to be accessible in the
2501 * specified mode (read, write or read/write) with raw mmio accessors.
2502 *
2503 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
2504 * callers to do FIFO management on their own or risk losing writes.
2505 */
2506enum forcewake_domains
4319382e 2507intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
3756685a
TU
2508 i915_reg_t reg, unsigned int op)
2509{
2510 enum forcewake_domains fw_domains = 0;
2511
a9f236d1 2512 drm_WARN_ON(&uncore->i915->drm, !op);
3756685a 2513
4319382e 2514 if (!intel_uncore_has_forcewake(uncore))
895833bd
TU
2515 return 0;
2516
3756685a 2517 if (op & FW_REG_READ)
ccb2acea 2518 fw_domains = uncore->funcs.read_fw_domains(uncore, reg);
3756685a
TU
2519
2520 if (op & FW_REG_WRITE)
ccb2acea
DCS
2521 fw_domains |= uncore->funcs.write_fw_domains(uncore, reg);
2522
a9f236d1 2523 drm_WARN_ON(&uncore->i915->drm, fw_domains & ~uncore->fw_domains);
3756685a
TU
2524
2525 return fw_domains;
2526}
26e7a2a1 2527
932641f0
DCS
2528u32 intel_uncore_read_with_mcr_steering_fw(struct intel_uncore *uncore,
2529 i915_reg_t reg,
2530 int slice, int subslice)
2531{
2532 u32 mcr_mask, mcr_ss, mcr, old_mcr, val;
2533
2534 lockdep_assert_held(&uncore->lock);
2535
2536 if (GRAPHICS_VER(uncore->i915) >= 11) {
2537 mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
2538 mcr_ss = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
2539 } else {
2540 mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
2541 mcr_ss = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
2542 }
2543
2544 old_mcr = mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
2545
2546 mcr &= ~mcr_mask;
2547 mcr |= mcr_ss;
2548 intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
2549
2550 val = intel_uncore_read_fw(uncore, reg);
2551
2552 mcr &= ~mcr_mask;
2553 mcr |= old_mcr & mcr_mask;
2554
2555 intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
2556
2557 return val;
2558}
2559
2560u32 intel_uncore_read_with_mcr_steering(struct intel_uncore *uncore,
2561 i915_reg_t reg, int slice, int subslice)
2562{
2563 enum forcewake_domains fw_domains;
2564 u32 val;
2565
2566 fw_domains = intel_uncore_forcewake_for_reg(uncore, reg,
2567 FW_REG_READ);
2568 fw_domains |= intel_uncore_forcewake_for_reg(uncore,
2569 GEN8_MCR_SELECTOR,
2570 FW_REG_READ | FW_REG_WRITE);
2571
2572 spin_lock_irq(&uncore->lock);
2573 intel_uncore_forcewake_get__locked(uncore, fw_domains);
2574
2575 val = intel_uncore_read_with_mcr_steering_fw(uncore, reg, slice, subslice);
2576
2577 intel_uncore_forcewake_put__locked(uncore, fw_domains);
2578 spin_unlock_irq(&uncore->lock);
2579
2580 return val;
2581}
2582
26e7a2a1 2583#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
0757ac8f 2584#include "selftests/mock_uncore.c"
26e7a2a1
CW
2585#include "selftests/intel_uncore.c"
2586#endif