drm/i915/guc/rc: Setup and enable GuCRC feature
[linux-2.6-block.git] / drivers / gpu / drm / i915 / intel_uncore.c
CommitLineData
907b28c5
CW
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
696173b0
JN
24#include <linux/pm_runtime.h>
25#include <asm/iosf_mbi.h>
26
bfac1e2b
MR
27#include "gt/intel_lrc_reg.h" /* for shadow reg list */
28
907b28c5 29#include "i915_drv.h"
a09d9a80 30#include "i915_trace.h"
cf9d2890 31#include "i915_vgpu.h"
696173b0 32#include "intel_pm.h"
6daccb0b 33
83e33372 34#define FORCEWAKE_ACK_TIMEOUT_MS 50
6b07b6d2 35#define GT_FIFO_TIMEOUT_MS 10
907b28c5 36
6cc5ca76 37#define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
6af5d92f 38
0a9b2630
DCS
39void
40intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug)
41{
42 spin_lock_init(&mmio_debug->lock);
43 mmio_debug->unclaimed_mmio_check = 1;
44}
45
46static void mmio_debug_suspend(struct intel_uncore_mmio_debug *mmio_debug)
47{
48 lockdep_assert_held(&mmio_debug->lock);
49
50 /* Save and disable mmio debugging for the user bypass */
51 if (!mmio_debug->suspend_count++) {
52 mmio_debug->saved_mmio_check = mmio_debug->unclaimed_mmio_check;
53 mmio_debug->unclaimed_mmio_check = 0;
54 }
55}
56
57static void mmio_debug_resume(struct intel_uncore_mmio_debug *mmio_debug)
58{
59 lockdep_assert_held(&mmio_debug->lock);
60
61 if (!--mmio_debug->suspend_count)
62 mmio_debug->unclaimed_mmio_check = mmio_debug->saved_mmio_check;
63}
64
05a2fb15
MK
65static const char * const forcewake_domain_names[] = {
66 "render",
67 "blitter",
68 "media",
a89a70a8
DCS
69 "vdbox0",
70 "vdbox1",
71 "vdbox2",
72 "vdbox3",
bfac1e2b
MR
73 "vdbox4",
74 "vdbox5",
75 "vdbox6",
76 "vdbox7",
a89a70a8
DCS
77 "vebox0",
78 "vebox1",
bfac1e2b
MR
79 "vebox2",
80 "vebox3",
05a2fb15
MK
81};
82
83const char *
48c1026a 84intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
05a2fb15 85{
53abb679 86 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
05a2fb15
MK
87
88 if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
89 return forcewake_domain_names[id];
90
91 WARN_ON(id);
92
93 return "unknown";
94}
95
535d8d27 96#define fw_ack(d) readl((d)->reg_ack)
159367bb
DCS
97#define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set)
98#define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set)
535d8d27 99
05a2fb15 100static inline void
159367bb 101fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
907b28c5 102{
26376a7e
OM
103 /*
104 * We don't really know if the powerwell for the forcewake domain we are
105 * trying to reset here does exist at this point (engines could be fused
106 * off in ICL+), so no waiting for acks
107 */
159367bb
DCS
108 /* WaRsClearFWBitsAtReset:bdw,skl */
109 fw_clear(d, 0xffff);
907b28c5
CW
110}
111
05a2fb15
MK
112static inline void
113fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
907b28c5 114{
77adbd8f
CW
115 GEM_BUG_ON(d->uncore->fw_domains_timer & d->mask);
116 d->uncore->fw_domains_timer |= d->mask;
a57a4a67
TU
117 d->wake_count++;
118 hrtimer_start_range_ns(&d->timer,
8b0e1953 119 NSEC_PER_MSEC,
a57a4a67
TU
120 NSEC_PER_MSEC,
121 HRTIMER_MODE_REL);
907b28c5
CW
122}
123
71306303 124static inline int
535d8d27 125__wait_for_ack(const struct intel_uncore_forcewake_domain *d,
71306303
MK
126 const u32 ack,
127 const u32 value)
128{
535d8d27 129 return wait_for_atomic((fw_ack(d) & ack) == value,
71306303
MK
130 FORCEWAKE_ACK_TIMEOUT_MS);
131}
132
133static inline int
535d8d27 134wait_ack_clear(const struct intel_uncore_forcewake_domain *d,
71306303
MK
135 const u32 ack)
136{
535d8d27 137 return __wait_for_ack(d, ack, 0);
71306303
MK
138}
139
140static inline int
535d8d27 141wait_ack_set(const struct intel_uncore_forcewake_domain *d,
71306303
MK
142 const u32 ack)
143{
535d8d27 144 return __wait_for_ack(d, ack, ack);
71306303
MK
145}
146
05a2fb15 147static inline void
535d8d27 148fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
907b28c5 149{
18ecc6c5 150 if (wait_ack_clear(d, FORCEWAKE_KERNEL)) {
05a2fb15
MK
151 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
152 intel_uncore_forcewake_domain_to_str(d->id));
65706203 153 add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
18ecc6c5 154 }
05a2fb15 155}
907b28c5 156
71306303
MK
157enum ack_type {
158 ACK_CLEAR = 0,
159 ACK_SET
160};
161
162static int
535d8d27 163fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d,
71306303
MK
164 const enum ack_type type)
165{
166 const u32 ack_bit = FORCEWAKE_KERNEL;
167 const u32 value = type == ACK_SET ? ack_bit : 0;
168 unsigned int pass;
169 bool ack_detected;
170
171 /*
172 * There is a possibility of driver's wake request colliding
173 * with hardware's own wake requests and that can cause
174 * hardware to not deliver the driver's ack message.
175 *
176 * Use a fallback bit toggle to kick the gpu state machine
177 * in the hope that the original ack will be delivered along with
178 * the fallback ack.
179 *
cc38cae7
OM
180 * This workaround is described in HSDES #1604254524 and it's known as:
181 * WaRsForcewakeAddDelayForAck:skl,bxt,kbl,glk,cfl,cnl,icl
182 * although the name is a bit misleading.
71306303
MK
183 */
184
185 pass = 1;
186 do {
535d8d27 187 wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK);
71306303 188
159367bb 189 fw_set(d, FORCEWAKE_KERNEL_FALLBACK);
71306303
MK
190 /* Give gt some time to relax before the polling frenzy */
191 udelay(10 * pass);
535d8d27 192 wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK);
71306303 193
535d8d27 194 ack_detected = (fw_ack(d) & ack_bit) == value;
71306303 195
159367bb 196 fw_clear(d, FORCEWAKE_KERNEL_FALLBACK);
71306303
MK
197 } while (!ack_detected && pass++ < 10);
198
199 DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n",
200 intel_uncore_forcewake_domain_to_str(d->id),
201 type == ACK_SET ? "set" : "clear",
535d8d27 202 fw_ack(d),
71306303
MK
203 pass);
204
205 return ack_detected ? 0 : -ETIMEDOUT;
206}
207
208static inline void
535d8d27 209fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d)
71306303 210{
535d8d27 211 if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL)))
71306303
MK
212 return;
213
535d8d27
DCS
214 if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR))
215 fw_domain_wait_ack_clear(d);
71306303
MK
216}
217
05a2fb15 218static inline void
159367bb 219fw_domain_get(const struct intel_uncore_forcewake_domain *d)
05a2fb15 220{
159367bb 221 fw_set(d, FORCEWAKE_KERNEL);
05a2fb15 222}
907b28c5 223
05a2fb15 224static inline void
535d8d27 225fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
05a2fb15 226{
18ecc6c5 227 if (wait_ack_set(d, FORCEWAKE_KERNEL)) {
05a2fb15
MK
228 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
229 intel_uncore_forcewake_domain_to_str(d->id));
65706203 230 add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
18ecc6c5 231 }
05a2fb15 232}
907b28c5 233
71306303 234static inline void
535d8d27 235fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d)
71306303 236{
535d8d27 237 if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL)))
71306303
MK
238 return;
239
535d8d27
DCS
240 if (fw_domain_wait_ack_with_fallback(d, ACK_SET))
241 fw_domain_wait_ack_set(d);
71306303
MK
242}
243
05a2fb15 244static inline void
159367bb 245fw_domain_put(const struct intel_uncore_forcewake_domain *d)
05a2fb15 246{
159367bb 247 fw_clear(d, FORCEWAKE_KERNEL);
907b28c5
CW
248}
249
05a2fb15 250static void
f568eeee 251fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
907b28c5 252{
05a2fb15 253 struct intel_uncore_forcewake_domain *d;
d2dc94bc 254 unsigned int tmp;
907b28c5 255
535d8d27 256 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
d2dc94bc 257
f568eeee 258 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
535d8d27 259 fw_domain_wait_ack_clear(d);
159367bb 260 fw_domain_get(d);
05a2fb15 261 }
4e1176dd 262
f568eeee 263 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
535d8d27 264 fw_domain_wait_ack_set(d);
71306303 265
535d8d27 266 uncore->fw_domains_active |= fw_domains;
71306303
MK
267}
268
269static void
f568eeee 270fw_domains_get_with_fallback(struct intel_uncore *uncore,
71306303
MK
271 enum forcewake_domains fw_domains)
272{
273 struct intel_uncore_forcewake_domain *d;
274 unsigned int tmp;
275
535d8d27 276 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
71306303 277
f568eeee 278 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
535d8d27 279 fw_domain_wait_ack_clear_fallback(d);
159367bb 280 fw_domain_get(d);
71306303
MK
281 }
282
f568eeee 283 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
535d8d27 284 fw_domain_wait_ack_set_fallback(d);
b8473050 285
535d8d27 286 uncore->fw_domains_active |= fw_domains;
05a2fb15 287}
907b28c5 288
05a2fb15 289static void
f568eeee 290fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
05a2fb15
MK
291{
292 struct intel_uncore_forcewake_domain *d;
d2dc94bc
CW
293 unsigned int tmp;
294
535d8d27 295 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
907b28c5 296
f568eeee 297 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
159367bb 298 fw_domain_put(d);
b8473050 299
535d8d27 300 uncore->fw_domains_active &= ~fw_domains;
05a2fb15 301}
907b28c5 302
05a2fb15 303static void
f568eeee 304fw_domains_reset(struct intel_uncore *uncore,
577ac4bd 305 enum forcewake_domains fw_domains)
05a2fb15
MK
306{
307 struct intel_uncore_forcewake_domain *d;
d2dc94bc 308 unsigned int tmp;
05a2fb15 309
d2dc94bc 310 if (!fw_domains)
3225b2f9 311 return;
f9b3927a 312
535d8d27 313 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
d2dc94bc 314
f568eeee 315 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
159367bb 316 fw_domain_reset(d);
05a2fb15
MK
317}
318
6ebc9692 319static inline u32 gt_thread_status(struct intel_uncore *uncore)
a5b22b5e
CW
320{
321 u32 val;
322
6cc5ca76 323 val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG);
a5b22b5e
CW
324 val &= GEN6_GT_THREAD_STATUS_CORE_MASK;
325
326 return val;
327}
328
6ebc9692 329static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
05a2fb15 330{
a5b22b5e
CW
331 /*
332 * w/a for a sporadic read returning 0 by waiting for the GT
05a2fb15
MK
333 * thread to wake up.
334 */
a9f236d1
PB
335 drm_WARN_ONCE(&uncore->i915->drm,
336 wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
337 "GT thread status wait timed out\n");
05a2fb15
MK
338}
339
f568eeee 340static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
48c1026a 341 enum forcewake_domains fw_domains)
05a2fb15 342{
f568eeee 343 fw_domains_get(uncore, fw_domains);
907b28c5 344
05a2fb15 345 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
6ebc9692 346 __gen6_gt_wait_for_thread_c0(uncore);
907b28c5
CW
347}
348
6ebc9692 349static inline u32 fifo_free_entries(struct intel_uncore *uncore)
c32e3788 350{
6cc5ca76 351 u32 count = __raw_uncore_read32(uncore, GTFIFOCTL);
c32e3788
DG
352
353 return count & GT_FIFO_FREE_ENTRIES_MASK;
354}
355
6ebc9692 356static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
907b28c5 357{
6b07b6d2 358 u32 n;
907b28c5 359
5135d64b
D
360 /* On VLV, FIFO will be shared by both SW and HW.
361 * So, we need to read the FREE_ENTRIES everytime */
01385758 362 if (IS_VALLEYVIEW(uncore->i915))
6ebc9692 363 n = fifo_free_entries(uncore);
6b07b6d2 364 else
272c7e52 365 n = uncore->fifo_count;
6b07b6d2
MK
366
367 if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
6ebc9692 368 if (wait_for_atomic((n = fifo_free_entries(uncore)) >
6b07b6d2
MK
369 GT_FIFO_NUM_RESERVED_ENTRIES,
370 GT_FIFO_TIMEOUT_MS)) {
d0208cfa
WK
371 drm_dbg(&uncore->i915->drm,
372 "GT_FIFO timeout, entries: %u\n", n);
6b07b6d2 373 return;
907b28c5 374 }
907b28c5 375 }
907b28c5 376
272c7e52 377 uncore->fifo_count = n - 1;
907b28c5
CW
378}
379
a57a4a67
TU
380static enum hrtimer_restart
381intel_uncore_fw_release_timer(struct hrtimer *timer)
38cff0b1 382{
a57a4a67
TU
383 struct intel_uncore_forcewake_domain *domain =
384 container_of(timer, struct intel_uncore_forcewake_domain, timer);
f833cdb0 385 struct intel_uncore *uncore = domain->uncore;
b2cff0db 386 unsigned long irqflags;
38cff0b1 387
eb17af67 388 assert_rpm_device_not_suspended(uncore->rpm);
38cff0b1 389
c9e0c6da
CW
390 if (xchg(&domain->active, false))
391 return HRTIMER_RESTART;
392
f568eeee 393 spin_lock_irqsave(&uncore->lock, irqflags);
b2cff0db 394
77adbd8f
CW
395 uncore->fw_domains_timer &= ~domain->mask;
396
397 GEM_BUG_ON(!domain->wake_count);
b8473050 398 if (--domain->wake_count == 0)
f568eeee 399 uncore->funcs.force_wake_put(uncore, domain->mask);
b2cff0db 400
f568eeee 401 spin_unlock_irqrestore(&uncore->lock, irqflags);
a57a4a67
TU
402
403 return HRTIMER_NORESTART;
38cff0b1
ZW
404}
405
a5266db4 406/* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
d60996ab 407static unsigned int
f568eeee 408intel_uncore_forcewake_reset(struct intel_uncore *uncore)
38cff0b1 409{
48c1026a 410 unsigned long irqflags;
b2cff0db 411 struct intel_uncore_forcewake_domain *domain;
48c1026a 412 int retry_count = 100;
003342a5 413 enum forcewake_domains fw, active_domains;
38cff0b1 414
a5266db4
HG
415 iosf_mbi_assert_punit_acquired();
416
b2cff0db
CW
417 /* Hold uncore.lock across reset to prevent any register access
418 * with forcewake not set correctly. Wait until all pending
419 * timers are run before holding.
420 */
421 while (1) {
d2dc94bc
CW
422 unsigned int tmp;
423
b2cff0db 424 active_domains = 0;
38cff0b1 425
f568eeee 426 for_each_fw_domain(domain, uncore, tmp) {
c9e0c6da 427 smp_store_mb(domain->active, false);
a57a4a67 428 if (hrtimer_cancel(&domain->timer) == 0)
b2cff0db 429 continue;
38cff0b1 430
a57a4a67 431 intel_uncore_fw_release_timer(&domain->timer);
b2cff0db 432 }
aec347ab 433
f568eeee 434 spin_lock_irqsave(&uncore->lock, irqflags);
b2ec142c 435
f568eeee 436 for_each_fw_domain(domain, uncore, tmp) {
a57a4a67 437 if (hrtimer_active(&domain->timer))
33c582c1 438 active_domains |= domain->mask;
b2cff0db 439 }
3123fcaf 440
b2cff0db
CW
441 if (active_domains == 0)
442 break;
aec347ab 443
b2cff0db 444 if (--retry_count == 0) {
d0208cfa 445 drm_err(&uncore->i915->drm, "Timed out waiting for forcewake timers to finish\n");
b2cff0db
CW
446 break;
447 }
0294ae7b 448
f568eeee 449 spin_unlock_irqrestore(&uncore->lock, irqflags);
b2cff0db
CW
450 cond_resched();
451 }
0294ae7b 452
a9f236d1 453 drm_WARN_ON(&uncore->i915->drm, active_domains);
b2cff0db 454
f568eeee 455 fw = uncore->fw_domains_active;
b2cff0db 456 if (fw)
f568eeee 457 uncore->funcs.force_wake_put(uncore, fw);
ef46e0d2 458
f568eeee
DCS
459 fw_domains_reset(uncore, uncore->fw_domains);
460 assert_forcewakes_inactive(uncore);
b2cff0db 461
f568eeee 462 spin_unlock_irqrestore(&uncore->lock, irqflags);
d60996ab
CW
463
464 return fw; /* track the lost user forcewake domains */
ef46e0d2
DV
465}
466
8a47eb19 467static bool
6ebc9692 468fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore)
8a47eb19
MK
469{
470 u32 dbg;
471
6cc5ca76 472 dbg = __raw_uncore_read32(uncore, FPGA_DBG);
8a47eb19
MK
473 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
474 return false;
475
29b6f88d
MR
476 /*
477 * Bugs in PCI programming (or failing hardware) can occasionally cause
478 * us to lose access to the MMIO BAR. When this happens, register
479 * reads will come back with 0xFFFFFFFF for every register and things
480 * go bad very quickly. Let's try to detect that special case and at
481 * least try to print a more informative message about what has
482 * happened.
483 *
484 * During normal operation the FPGA_DBG register has several unused
485 * bits that will always read back as 0's so we can use them as canaries
486 * to recognize when MMIO accesses are just busted.
487 */
488 if (unlikely(dbg == ~0))
489 drm_err(&uncore->i915->drm,
490 "Lost access to MMIO BAR; all registers now read back as 0xFFFFFFFF!\n");
491
6cc5ca76 492 __raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
8a47eb19
MK
493
494 return true;
495}
496
8ac3e1bb 497static bool
6ebc9692 498vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore)
8ac3e1bb
MK
499{
500 u32 cer;
501
6cc5ca76 502 cer = __raw_uncore_read32(uncore, CLAIM_ER);
8ac3e1bb
MK
503 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
504 return false;
505
6cc5ca76 506 __raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR);
8ac3e1bb
MK
507
508 return true;
509}
510
a338908c 511static bool
6ebc9692 512gen6_check_for_fifo_debug(struct intel_uncore *uncore)
a338908c
MK
513{
514 u32 fifodbg;
515
6cc5ca76 516 fifodbg = __raw_uncore_read32(uncore, GTFIFODBG);
a338908c
MK
517
518 if (unlikely(fifodbg)) {
d0208cfa 519 drm_dbg(&uncore->i915->drm, "GTFIFODBG = 0x08%x\n", fifodbg);
6cc5ca76 520 __raw_uncore_write32(uncore, GTFIFODBG, fifodbg);
a338908c
MK
521 }
522
523 return fifodbg;
524}
525
8ac3e1bb 526static bool
2cf7bf6f 527check_for_unclaimed_mmio(struct intel_uncore *uncore)
8ac3e1bb 528{
a338908c
MK
529 bool ret = false;
530
0a9b2630
DCS
531 lockdep_assert_held(&uncore->debug->lock);
532
533 if (uncore->debug->suspend_count)
534 return false;
535
2cf7bf6f 536 if (intel_uncore_has_fpga_dbg_unclaimed(uncore))
6ebc9692 537 ret |= fpga_check_for_unclaimed_mmio(uncore);
8ac3e1bb 538
2cf7bf6f 539 if (intel_uncore_has_dbg_unclaimed(uncore))
6ebc9692 540 ret |= vlv_check_for_unclaimed_mmio(uncore);
a338908c 541
2cf7bf6f 542 if (intel_uncore_has_fifo(uncore))
6ebc9692 543 ret |= gen6_check_for_fifo_debug(uncore);
8ac3e1bb 544
a338908c 545 return ret;
8ac3e1bb
MK
546}
547
2e81bc61
DCS
548static void forcewake_early_sanitize(struct intel_uncore *uncore,
549 unsigned int restore_forcewake)
f9b3927a 550{
2e81bc61 551 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
907b28c5 552
a04f90a3 553 /* WaDisableShadowRegForCpd:chv */
01385758 554 if (IS_CHERRYVIEW(uncore->i915)) {
6cc5ca76
DCS
555 __raw_uncore_write32(uncore, GTFIFOCTL,
556 __raw_uncore_read32(uncore, GTFIFOCTL) |
557 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
558 GT_FIFO_CTL_RC6_POLICY_STALL);
a04f90a3
D
559 }
560
a5266db4 561 iosf_mbi_punit_acquire();
f7de5027 562 intel_uncore_forcewake_reset(uncore);
d60996ab 563 if (restore_forcewake) {
f7de5027
DCS
564 spin_lock_irq(&uncore->lock);
565 uncore->funcs.force_wake_get(uncore, restore_forcewake);
566
2cf7bf6f 567 if (intel_uncore_has_fifo(uncore))
6ebc9692 568 uncore->fifo_count = fifo_free_entries(uncore);
f7de5027 569 spin_unlock_irq(&uncore->lock);
d60996ab 570 }
a5266db4 571 iosf_mbi_punit_release();
521198a2
MK
572}
573
f7de5027 574void intel_uncore_suspend(struct intel_uncore *uncore)
ed493883 575{
2e81bc61
DCS
576 if (!intel_uncore_has_forcewake(uncore))
577 return;
578
a5266db4
HG
579 iosf_mbi_punit_acquire();
580 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
f7de5027
DCS
581 &uncore->pmic_bus_access_nb);
582 uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore);
a5266db4 583 iosf_mbi_punit_release();
68f60946
HG
584}
585
f7de5027 586void intel_uncore_resume_early(struct intel_uncore *uncore)
68f60946 587{
d60996ab
CW
588 unsigned int restore_forcewake;
589
2e81bc61 590 if (intel_uncore_unclaimed_mmio(uncore))
d0208cfa 591 drm_dbg(&uncore->i915->drm, "unclaimed mmio detected on resume, clearing\n");
2e81bc61
DCS
592
593 if (!intel_uncore_has_forcewake(uncore))
594 return;
595
f7de5027 596 restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
2e81bc61 597 forcewake_early_sanitize(uncore, restore_forcewake);
d60996ab 598
f7de5027 599 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
ed493883
ID
600}
601
f7de5027 602void intel_uncore_runtime_resume(struct intel_uncore *uncore)
bedf4d79 603{
2e81bc61
DCS
604 if (!intel_uncore_has_forcewake(uncore))
605 return;
606
f7de5027 607 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
bedf4d79
HG
608}
609
f568eeee 610static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
a6111f7b
CW
611 enum forcewake_domains fw_domains)
612{
613 struct intel_uncore_forcewake_domain *domain;
d2dc94bc 614 unsigned int tmp;
a6111f7b 615
f568eeee 616 fw_domains &= uncore->fw_domains;
a6111f7b 617
f568eeee 618 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
c9e0c6da 619 if (domain->wake_count++) {
33c582c1 620 fw_domains &= ~domain->mask;
c9e0c6da
CW
621 domain->active = true;
622 }
623 }
a6111f7b 624
b8473050 625 if (fw_domains)
f568eeee 626 uncore->funcs.force_wake_get(uncore, fw_domains);
a6111f7b
CW
627}
628
59bad947
MK
629/**
630 * intel_uncore_forcewake_get - grab forcewake domain references
3ceea6a1 631 * @uncore: the intel_uncore structure
59bad947
MK
632 * @fw_domains: forcewake domains to get reference on
633 *
634 * This function can be used get GT's forcewake domain references.
635 * Normal register access will handle the forcewake domains automatically.
636 * However if some sequence requires the GT to not power down a particular
637 * forcewake domains this function should be called at the beginning of the
638 * sequence. And subsequently the reference should be dropped by symmetric
639 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
640 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
907b28c5 641 */
3ceea6a1 642void intel_uncore_forcewake_get(struct intel_uncore *uncore,
48c1026a 643 enum forcewake_domains fw_domains)
907b28c5
CW
644{
645 unsigned long irqflags;
646
f568eeee 647 if (!uncore->funcs.force_wake_get)
ab484f8f
BW
648 return;
649
87b391b9 650 assert_rpm_wakelock_held(uncore->rpm);
c8c8fb33 651
f568eeee
DCS
652 spin_lock_irqsave(&uncore->lock, irqflags);
653 __intel_uncore_forcewake_get(uncore, fw_domains);
654 spin_unlock_irqrestore(&uncore->lock, irqflags);
907b28c5
CW
655}
656
d7a133d8
CW
657/**
658 * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
3ceea6a1 659 * @uncore: the intel_uncore structure
d7a133d8
CW
660 *
661 * This function is a wrapper around intel_uncore_forcewake_get() to acquire
662 * the GT powerwell and in the process disable our debugging for the
663 * duration of userspace's bypass.
664 */
3ceea6a1 665void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
d7a133d8 666{
f568eeee 667 spin_lock_irq(&uncore->lock);
0a9b2630 668 if (!uncore->user_forcewake_count++) {
3ceea6a1 669 intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
0a9b2630
DCS
670 spin_lock(&uncore->debug->lock);
671 mmio_debug_suspend(uncore->debug);
672 spin_unlock(&uncore->debug->lock);
d7a133d8 673 }
f568eeee 674 spin_unlock_irq(&uncore->lock);
d7a133d8
CW
675}
676
677/**
678 * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
3ceea6a1 679 * @uncore: the intel_uncore structure
d7a133d8
CW
680 *
681 * This function complements intel_uncore_forcewake_user_get() and releases
682 * the GT powerwell taken on behalf of the userspace bypass.
683 */
3ceea6a1 684void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
d7a133d8 685{
f568eeee 686 spin_lock_irq(&uncore->lock);
0a9b2630
DCS
687 if (!--uncore->user_forcewake_count) {
688 spin_lock(&uncore->debug->lock);
689 mmio_debug_resume(uncore->debug);
690
691 if (check_for_unclaimed_mmio(uncore))
19b5b50f 692 drm_info(&uncore->i915->drm,
d7a133d8 693 "Invalid mmio detected during user access\n");
0a9b2630 694 spin_unlock(&uncore->debug->lock);
d7a133d8 695
3ceea6a1 696 intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
d7a133d8 697 }
f568eeee 698 spin_unlock_irq(&uncore->lock);
d7a133d8
CW
699}
700
59bad947 701/**
a6111f7b 702 * intel_uncore_forcewake_get__locked - grab forcewake domain references
3ceea6a1 703 * @uncore: the intel_uncore structure
a6111f7b 704 * @fw_domains: forcewake domains to get reference on
59bad947 705 *
a6111f7b
CW
706 * See intel_uncore_forcewake_get(). This variant places the onus
707 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
907b28c5 708 */
3ceea6a1 709void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
a6111f7b
CW
710 enum forcewake_domains fw_domains)
711{
f568eeee
DCS
712 lockdep_assert_held(&uncore->lock);
713
714 if (!uncore->funcs.force_wake_get)
a6111f7b
CW
715 return;
716
f568eeee 717 __intel_uncore_forcewake_get(uncore, fw_domains);
a6111f7b
CW
718}
719
f568eeee 720static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
a6111f7b 721 enum forcewake_domains fw_domains)
907b28c5 722{
b2cff0db 723 struct intel_uncore_forcewake_domain *domain;
d2dc94bc 724 unsigned int tmp;
907b28c5 725
f568eeee 726 fw_domains &= uncore->fw_domains;
b2cff0db 727
f568eeee 728 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
77adbd8f 729 GEM_BUG_ON(!domain->wake_count);
b2cff0db 730
c9e0c6da
CW
731 if (--domain->wake_count) {
732 domain->active = true;
b2cff0db 733 continue;
c9e0c6da 734 }
b2cff0db 735
03c10f47 736 uncore->funcs.force_wake_put(uncore, domain->mask);
aec347ab 737 }
a6111f7b 738}
dc9fb09c 739
a6111f7b
CW
740/**
741 * intel_uncore_forcewake_put - release a forcewake domain reference
3ceea6a1 742 * @uncore: the intel_uncore structure
a6111f7b
CW
743 * @fw_domains: forcewake domains to put references
744 *
745 * This function drops the device-level forcewakes for specified
746 * domains obtained by intel_uncore_forcewake_get().
747 */
3ceea6a1 748void intel_uncore_forcewake_put(struct intel_uncore *uncore,
a6111f7b
CW
749 enum forcewake_domains fw_domains)
750{
751 unsigned long irqflags;
752
f568eeee 753 if (!uncore->funcs.force_wake_put)
a6111f7b
CW
754 return;
755
f568eeee
DCS
756 spin_lock_irqsave(&uncore->lock, irqflags);
757 __intel_uncore_forcewake_put(uncore, fw_domains);
758 spin_unlock_irqrestore(&uncore->lock, irqflags);
907b28c5
CW
759}
760
032d992d
CW
761/**
762 * intel_uncore_forcewake_flush - flush the delayed release
763 * @uncore: the intel_uncore structure
764 * @fw_domains: forcewake domains to flush
765 */
766void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
767 enum forcewake_domains fw_domains)
768{
769 struct intel_uncore_forcewake_domain *domain;
770 unsigned int tmp;
771
772 if (!uncore->funcs.force_wake_put)
773 return;
774
775 fw_domains &= uncore->fw_domains;
776 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
777 WRITE_ONCE(domain->active, false);
778 if (hrtimer_cancel(&domain->timer))
779 intel_uncore_fw_release_timer(&domain->timer);
780 }
781}
782
a6111f7b
CW
783/**
784 * intel_uncore_forcewake_put__locked - grab forcewake domain references
3ceea6a1 785 * @uncore: the intel_uncore structure
a6111f7b
CW
786 * @fw_domains: forcewake domains to get reference on
787 *
788 * See intel_uncore_forcewake_put(). This variant places the onus
789 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
790 */
3ceea6a1 791void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
a6111f7b
CW
792 enum forcewake_domains fw_domains)
793{
f568eeee
DCS
794 lockdep_assert_held(&uncore->lock);
795
796 if (!uncore->funcs.force_wake_put)
a6111f7b
CW
797 return;
798
f568eeee 799 __intel_uncore_forcewake_put(uncore, fw_domains);
a6111f7b
CW
800}
801
f568eeee 802void assert_forcewakes_inactive(struct intel_uncore *uncore)
e998c40f 803{
f568eeee 804 if (!uncore->funcs.force_wake_get)
e998c40f
PZ
805 return;
806
a9f236d1
PB
807 drm_WARN(&uncore->i915->drm, uncore->fw_domains_active,
808 "Expected all fw_domains to be inactive, but %08x are still on\n",
809 uncore->fw_domains_active);
67e64564
CW
810}
811
f568eeee 812void assert_forcewakes_active(struct intel_uncore *uncore,
67e64564
CW
813 enum forcewake_domains fw_domains)
814{
b7dc9395
CW
815 struct intel_uncore_forcewake_domain *domain;
816 unsigned int tmp;
817
818 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
819 return;
820
f568eeee 821 if (!uncore->funcs.force_wake_get)
67e64564
CW
822 return;
823
15e7facb
CW
824 spin_lock_irq(&uncore->lock);
825
87b391b9 826 assert_rpm_wakelock_held(uncore->rpm);
67e64564 827
f568eeee 828 fw_domains &= uncore->fw_domains;
a9f236d1
PB
829 drm_WARN(&uncore->i915->drm, fw_domains & ~uncore->fw_domains_active,
830 "Expected %08x fw_domains to be active, but %08x are off\n",
831 fw_domains, fw_domains & ~uncore->fw_domains_active);
b7dc9395
CW
832
833 /*
834 * Check that the caller has an explicit wakeref and we don't mistake
835 * it for the auto wakeref.
836 */
b7dc9395 837 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
badf1f27 838 unsigned int actual = READ_ONCE(domain->wake_count);
b7dc9395
CW
839 unsigned int expect = 1;
840
77adbd8f 841 if (uncore->fw_domains_timer & domain->mask)
b7dc9395
CW
842 expect++; /* pending automatic release */
843
a9f236d1
PB
844 if (drm_WARN(&uncore->i915->drm, actual < expect,
845 "Expected domain %d to be held awake by caller, count=%d\n",
846 domain->id, actual))
b7dc9395
CW
847 break;
848 }
15e7facb
CW
849
850 spin_unlock_irq(&uncore->lock);
e998c40f
PZ
851}
852
907b28c5 853/* We give fast paths for the really cool registers */
40181697 854#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
907b28c5 855
272c7e52 856#define __gen6_reg_read_fw_domains(uncore, offset) \
6863b76c
TU
857({ \
858 enum forcewake_domains __fwd; \
859 if (NEEDS_FORCE_WAKE(offset)) \
860 __fwd = FORCEWAKE_RENDER; \
861 else \
862 __fwd = 0; \
863 __fwd; \
864})
865
9480dbf0 866static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
91e630b9 867{
91e630b9
TU
868 if (offset < entry->start)
869 return -1;
870 else if (offset > entry->end)
871 return 1;
872 else
873 return 0;
874}
875
9480dbf0
TU
876/* Copied and "macroized" from lib/bsearch.c */
877#define BSEARCH(key, base, num, cmp) ({ \
878 unsigned int start__ = 0, end__ = (num); \
879 typeof(base) result__ = NULL; \
880 while (start__ < end__) { \
881 unsigned int mid__ = start__ + (end__ - start__) / 2; \
882 int ret__ = (cmp)((key), (base) + mid__); \
883 if (ret__ < 0) { \
884 end__ = mid__; \
885 } else if (ret__ > 0) { \
886 start__ = mid__ + 1; \
887 } else { \
888 result__ = (base) + mid__; \
889 break; \
890 } \
891 } \
892 result__; \
893})
894
9fc1117c 895static enum forcewake_domains
cb7ee690 896find_fw_domain(struct intel_uncore *uncore, u32 offset)
9fc1117c 897{
9480dbf0 898 const struct intel_forcewake_range *entry;
9fc1117c 899
9480dbf0 900 entry = BSEARCH(offset,
cb7ee690
DCS
901 uncore->fw_domains_table,
902 uncore->fw_domains_table_entries,
91e630b9 903 fw_range_cmp);
38fb6a40 904
99191427
JL
905 if (!entry)
906 return 0;
907
a89a70a8
DCS
908 /*
909 * The list of FW domains depends on the SKU in gen11+ so we
910 * can't determine it statically. We use FORCEWAKE_ALL and
911 * translate it here to the list of available domains.
912 */
913 if (entry->domains == FORCEWAKE_ALL)
cb7ee690 914 return uncore->fw_domains;
a89a70a8 915
a9f236d1
PB
916 drm_WARN(&uncore->i915->drm, entry->domains & ~uncore->fw_domains,
917 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
918 entry->domains & ~uncore->fw_domains, offset);
99191427
JL
919
920 return entry->domains;
9fc1117c
TU
921}
922
923#define GEN_FW_RANGE(s, e, d) \
924 { .start = (s), .end = (e), .domains = (d) }
1938e59a 925
b0081239 926/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
9fc1117c
TU
927static const struct intel_forcewake_range __vlv_fw_ranges[] = {
928 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
929 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
930 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
9fc1117c
TU
931 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
932 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
b0081239 933 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
9fc1117c
TU
934 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
935};
1938e59a 936
272c7e52 937#define __fwtable_reg_read_fw_domains(uncore, offset) \
6863b76c
TU
938({ \
939 enum forcewake_domains __fwd = 0; \
0dd356bb 940 if (NEEDS_FORCE_WAKE((offset))) \
272c7e52 941 __fwd = find_fw_domain(uncore, offset); \
6863b76c
TU
942 __fwd; \
943})
944
272c7e52 945#define __gen11_fwtable_reg_read_fw_domains(uncore, offset) \
c9f8d187 946 find_fw_domain(uncore, offset)
a89a70a8 947
cf82d9dd
MT
948#define __gen12_fwtable_reg_read_fw_domains(uncore, offset) \
949 find_fw_domain(uncore, offset)
950
47188574 951/* *Must* be sorted by offset! See intel_shadow_table_check(). */
6863b76c 952static const i915_reg_t gen8_shadowed_regs[] = {
47188574
TU
953 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
954 GEN6_RPNSWREQ, /* 0xA008 */
955 GEN6_RC_VIDEO_FREQ, /* 0xA00C */
956 RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */
957 RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */
958 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
6863b76c
TU
959 /* TODO: Other registers are not yet used */
960};
961
a89a70a8 962static const i915_reg_t gen11_shadowed_regs[] = {
bfac1e2b
MR
963 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
964 RING_EXECLIST_CONTROL(RENDER_RING_BASE), /* 0x2550 */
965 GEN6_RPNSWREQ, /* 0xA008 */
966 GEN6_RC_VIDEO_FREQ, /* 0xA00C */
967 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
968 RING_EXECLIST_CONTROL(BLT_RING_BASE), /* 0x22550 */
969 RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */
970 RING_EXECLIST_CONTROL(GEN11_BSD_RING_BASE), /* 0x1C0550 */
971 RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */
972 RING_EXECLIST_CONTROL(GEN11_BSD2_RING_BASE), /* 0x1C4550 */
973 RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */
974 RING_EXECLIST_CONTROL(GEN11_VEBOX_RING_BASE), /* 0x1C8550 */
975 RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */
976 RING_EXECLIST_CONTROL(GEN11_BSD3_RING_BASE), /* 0x1D0550 */
977 RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */
978 RING_EXECLIST_CONTROL(GEN11_BSD4_RING_BASE), /* 0x1D4550 */
979 RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */
980 RING_EXECLIST_CONTROL(GEN11_VEBOX2_RING_BASE), /* 0x1D8550 */
a89a70a8
DCS
981 /* TODO: Other registers are not yet used */
982};
983
cf82d9dd 984static const i915_reg_t gen12_shadowed_regs[] = {
bfac1e2b
MR
985 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
986 RING_EXECLIST_CONTROL(RENDER_RING_BASE), /* 0x2550 */
987 GEN6_RPNSWREQ, /* 0xA008 */
988 GEN6_RC_VIDEO_FREQ, /* 0xA00C */
989 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
990 RING_EXECLIST_CONTROL(BLT_RING_BASE), /* 0x22550 */
991 RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */
992 RING_EXECLIST_CONTROL(GEN11_BSD_RING_BASE), /* 0x1C0550 */
993 RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */
994 RING_EXECLIST_CONTROL(GEN11_BSD2_RING_BASE), /* 0x1C4550 */
995 RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */
996 RING_EXECLIST_CONTROL(GEN11_VEBOX_RING_BASE), /* 0x1C8550 */
997 RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */
998 RING_EXECLIST_CONTROL(GEN11_BSD3_RING_BASE), /* 0x1D0550 */
999 RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */
1000 RING_EXECLIST_CONTROL(GEN11_BSD4_RING_BASE), /* 0x1D4550 */
1001 RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */
1002 RING_EXECLIST_CONTROL(GEN11_VEBOX2_RING_BASE), /* 0x1D8550 */
1003 /* TODO: Other registers are not yet used */
1004};
1005
1006static const i915_reg_t xehp_shadowed_regs[] = {
1007 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
1008 RING_EXECLIST_CONTROL(RENDER_RING_BASE), /* 0x2550 */
1009 GEN6_RPNSWREQ, /* 0xA008 */
1010 GEN6_RC_VIDEO_FREQ, /* 0xA00C */
1011 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
1012 RING_EXECLIST_CONTROL(BLT_RING_BASE), /* 0x22550 */
1013 RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */
1014 RING_EXECLIST_CONTROL(GEN11_BSD_RING_BASE), /* 0x1C0550 */
1015 RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */
1016 RING_EXECLIST_CONTROL(GEN11_BSD2_RING_BASE), /* 0x1C4550 */
1017 RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */
1018 RING_EXECLIST_CONTROL(GEN11_VEBOX_RING_BASE), /* 0x1C8550 */
1019 RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */
1020 RING_EXECLIST_CONTROL(GEN11_BSD3_RING_BASE), /* 0x1D0550 */
1021 RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */
1022 RING_EXECLIST_CONTROL(GEN11_BSD4_RING_BASE), /* 0x1D4550 */
1023 RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */
1024 RING_EXECLIST_CONTROL(GEN11_VEBOX2_RING_BASE), /* 0x1D8550 */
1025 RING_TAIL(XEHP_BSD5_RING_BASE), /* 0x1E0000 (base) */
1026 RING_EXECLIST_CONTROL(XEHP_BSD5_RING_BASE), /* 0x1E0550 */
1027 RING_TAIL(XEHP_BSD6_RING_BASE), /* 0x1E4000 (base) */
1028 RING_EXECLIST_CONTROL(XEHP_BSD6_RING_BASE), /* 0x1E4550 */
1029 RING_TAIL(XEHP_VEBOX3_RING_BASE), /* 0x1E8000 (base) */
1030 RING_EXECLIST_CONTROL(XEHP_VEBOX3_RING_BASE), /* 0x1E8550 */
1031 RING_TAIL(XEHP_BSD7_RING_BASE), /* 0x1F0000 (base) */
1032 RING_EXECLIST_CONTROL(XEHP_BSD7_RING_BASE), /* 0x1F0550 */
1033 RING_TAIL(XEHP_BSD8_RING_BASE), /* 0x1F4000 (base) */
1034 RING_EXECLIST_CONTROL(XEHP_BSD8_RING_BASE), /* 0x1F4550 */
1035 RING_TAIL(XEHP_VEBOX4_RING_BASE), /* 0x1F8000 (base) */
1036 RING_EXECLIST_CONTROL(XEHP_VEBOX4_RING_BASE), /* 0x1F8550 */
cf82d9dd
MT
1037 /* TODO: Other registers are not yet used */
1038};
1039
9480dbf0 1040static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
5a659383 1041{
9480dbf0 1042 u32 offset = i915_mmio_reg_offset(*reg);
5a659383 1043
9480dbf0 1044 if (key < offset)
5a659383 1045 return -1;
9480dbf0 1046 else if (key > offset)
5a659383
TU
1047 return 1;
1048 else
1049 return 0;
1050}
1051
bfac1e2b
MR
1052#define __is_X_shadowed(x) \
1053static bool is_##x##_shadowed(u32 offset) \
a89a70a8 1054{ \
bfac1e2b
MR
1055 const i915_reg_t *regs = x##_shadowed_regs; \
1056 return BSEARCH(offset, regs, ARRAY_SIZE(x##_shadowed_regs), \
a89a70a8 1057 mmio_reg_cmp); \
6863b76c
TU
1058}
1059
bfac1e2b
MR
1060__is_X_shadowed(gen8)
1061__is_X_shadowed(gen11)
1062__is_X_shadowed(gen12)
1063__is_X_shadowed(xehp)
a89a70a8 1064
ccb2acea
DCS
1065static enum forcewake_domains
1066gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
1067{
1068 return FORCEWAKE_RENDER;
1069}
1070
272c7e52 1071#define __gen8_reg_write_fw_domains(uncore, offset) \
6863b76c
TU
1072({ \
1073 enum forcewake_domains __fwd; \
1074 if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
1075 __fwd = FORCEWAKE_RENDER; \
1076 else \
1077 __fwd = 0; \
1078 __fwd; \
1079})
1080
b0081239 1081/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
9fc1117c
TU
1082static const struct intel_forcewake_range __chv_fw_ranges[] = {
1083 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
b0081239 1084 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c 1085 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
b0081239 1086 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c 1087 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
b0081239 1088 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c 1089 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
b0081239
TU
1090 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1091 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
9fc1117c 1092 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
b0081239
TU
1093 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
1094 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c
TU
1095 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1096 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
1097 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
1098 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
9fc1117c 1099};
38fb6a40 1100
272c7e52 1101#define __fwtable_reg_write_fw_domains(uncore, offset) \
6863b76c
TU
1102({ \
1103 enum forcewake_domains __fwd = 0; \
0dd356bb 1104 if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
272c7e52 1105 __fwd = find_fw_domain(uncore, offset); \
6863b76c
TU
1106 __fwd; \
1107})
1108
272c7e52 1109#define __gen11_fwtable_reg_write_fw_domains(uncore, offset) \
a89a70a8
DCS
1110({ \
1111 enum forcewake_domains __fwd = 0; \
c9f8d187
MK
1112 const u32 __offset = (offset); \
1113 if (!is_gen11_shadowed(__offset)) \
1114 __fwd = find_fw_domain(uncore, __offset); \
a89a70a8
DCS
1115 __fwd; \
1116})
1117
cf82d9dd
MT
1118#define __gen12_fwtable_reg_write_fw_domains(uncore, offset) \
1119({ \
1120 enum forcewake_domains __fwd = 0; \
1121 const u32 __offset = (offset); \
1122 if (!is_gen12_shadowed(__offset)) \
1123 __fwd = find_fw_domain(uncore, __offset); \
1124 __fwd; \
1125})
1126
bfac1e2b
MR
1127#define __xehp_fwtable_reg_write_fw_domains(uncore, offset) \
1128({ \
1129 enum forcewake_domains __fwd = 0; \
1130 const u32 __offset = (offset); \
1131 if (!is_xehp_shadowed(__offset)) \
1132 __fwd = find_fw_domain(uncore, __offset); \
1133 __fwd; \
1134})
1135
b0081239 1136/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
9fc1117c 1137static const struct intel_forcewake_range __gen9_fw_ranges[] = {
55e3c170 1138 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_GT),
9fc1117c
TU
1139 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
1140 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
55e3c170 1141 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
9fc1117c 1142 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
55e3c170 1143 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT),
9fc1117c 1144 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
55e3c170 1145 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_GT),
b0081239 1146 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
9fc1117c 1147 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
55e3c170 1148 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT),
9fc1117c 1149 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
55e3c170 1150 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT),
b0081239 1151 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
55e3c170 1152 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_GT),
9fc1117c 1153 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
55e3c170 1154 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_GT),
9fc1117c 1155 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
55e3c170 1156 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT),
b0081239 1157 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
55e3c170 1158 GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_GT),
9fc1117c 1159 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
55e3c170 1160 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_GT),
b0081239 1161 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
55e3c170 1162 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_GT),
9fc1117c 1163 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
55e3c170 1164 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_GT),
9fc1117c 1165 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
55e3c170 1166 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_GT),
b0081239 1167 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
55e3c170 1168 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_GT),
9fc1117c
TU
1169 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
1170};
6863b76c 1171
a89a70a8
DCS
1172/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
1173static const struct intel_forcewake_range __gen11_fw_ranges[] = {
c4310def 1174 GEN_FW_RANGE(0x0, 0x1fff, 0), /* uncore range */
a89a70a8 1175 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
55e3c170 1176 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
a89a70a8 1177 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
55e3c170 1178 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT),
a89a70a8 1179 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
55e3c170 1180 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
a89a70a8 1181 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
55e3c170 1182 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT),
a89a70a8 1183 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
55e3c170 1184 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT),
c4310def 1185 GEN_FW_RANGE(0x8800, 0x8bff, 0),
a89a70a8 1186 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
55e3c170 1187 GEN_FW_RANGE(0x8d00, 0x94cf, FORCEWAKE_GT),
c4310def
RS
1188 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1189 GEN_FW_RANGE(0x9560, 0x95ff, 0),
55e3c170 1190 GEN_FW_RANGE(0x9600, 0xafff, FORCEWAKE_GT),
a89a70a8 1191 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
55e3c170 1192 GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_GT),
c9f8d187 1193 GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER),
55e3c170 1194 GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_GT),
c9f8d187 1195 GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER),
55e3c170 1196 GEN_FW_RANGE(0x1a000, 0x23fff, FORCEWAKE_GT),
c4310def 1197 GEN_FW_RANGE(0x24000, 0x2407f, 0),
55e3c170 1198 GEN_FW_RANGE(0x24080, 0x2417f, FORCEWAKE_GT),
c4310def 1199 GEN_FW_RANGE(0x24180, 0x242ff, FORCEWAKE_RENDER),
55e3c170 1200 GEN_FW_RANGE(0x24300, 0x243ff, FORCEWAKE_GT),
c4310def 1201 GEN_FW_RANGE(0x24400, 0x24fff, FORCEWAKE_RENDER),
55e3c170 1202 GEN_FW_RANGE(0x25000, 0x3ffff, FORCEWAKE_GT),
a89a70a8
DCS
1203 GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1204 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
c4310def
RS
1205 GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
1206 GEN_FW_RANGE(0x1c8000, 0x1cffff, FORCEWAKE_MEDIA_VEBOX0),
a89a70a8 1207 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
c4310def 1208 GEN_FW_RANGE(0x1d4000, 0x1dbfff, 0)
a89a70a8
DCS
1209};
1210
92f5df0d
MR
1211/*
1212 * *Must* be sorted by offset ranges! See intel_fw_table_check().
1213 *
1214 * Note that the spec lists several reserved/unused ranges that don't
1215 * actually contain any registers. In the table below we'll combine those
1216 * reserved ranges with either the preceding or following range to keep the
1217 * table small and lookups fast.
1218 */
cf82d9dd 1219static const struct intel_forcewake_range __gen12_fw_ranges[] = {
92f5df0d
MR
1220 GEN_FW_RANGE(0x0, 0x1fff, 0), /*
1221 0x0 - 0xaff: reserved
1222 0xb00 - 0x1fff: always on */
cf82d9dd 1223 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
92f5df0d
MR
1224 GEN_FW_RANGE(0x2700, 0x27ff, FORCEWAKE_GT),
1225 GEN_FW_RANGE(0x2800, 0x2aff, FORCEWAKE_RENDER),
1226 GEN_FW_RANGE(0x2b00, 0x2fff, FORCEWAKE_GT),
cf82d9dd 1227 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
92f5df0d
MR
1228 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT), /*
1229 0x4000 - 0x48ff: gt
1230 0x4900 - 0x51ff: reserved */
1231 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), /*
1232 0x5200 - 0x53ff: render
1233 0x5400 - 0x54ff: reserved
1234 0x5500 - 0x7fff: render */
55e3c170 1235 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
cf82d9dd 1236 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
92f5df0d
MR
1237 GEN_FW_RANGE(0x8160, 0x81ff, 0), /*
1238 0x8160 - 0x817f: reserved
1239 0x8180 - 0x81ff: always on */
1240 GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT),
cf82d9dd 1241 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
92f5df0d
MR
1242 GEN_FW_RANGE(0x8500, 0x94cf, FORCEWAKE_GT), /*
1243 0x8500 - 0x87ff: gt
1244 0x8800 - 0x8fff: reserved
1245 0x9000 - 0x947f: gt
1246 0x9480 - 0x94cf: reserved */
1247 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1248 GEN_FW_RANGE(0x9560, 0x97ff, 0), /*
1249 0x9560 - 0x95ff: always on
1250 0x9600 - 0x97ff: reserved */
55e3c170 1251 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT),
92f5df0d
MR
1252 GEN_FW_RANGE(0xb000, 0xb3ff, FORCEWAKE_RENDER),
1253 GEN_FW_RANGE(0xb400, 0xcfff, FORCEWAKE_GT), /*
1254 0xb400 - 0xbf7f: gt
1255 0xb480 - 0xbfff: reserved
1256 0xc000 - 0xcfff: gt */
1257 GEN_FW_RANGE(0xd000, 0xd7ff, 0),
1258 GEN_FW_RANGE(0xd800, 0xd8ff, FORCEWAKE_RENDER),
1259 GEN_FW_RANGE(0xd900, 0xdbff, FORCEWAKE_GT),
1260 GEN_FW_RANGE(0xdc00, 0xefff, FORCEWAKE_RENDER), /*
1261 0xdc00 - 0xddff: render
1262 0xde00 - 0xde7f: reserved
1263 0xde80 - 0xe8ff: render
1264 0xe900 - 0xefff: reserved */
1265 GEN_FW_RANGE(0xf000, 0x147ff, FORCEWAKE_GT), /*
1266 0xf000 - 0xffff: gt
1267 0x10000 - 0x147ff: reserved */
1268 GEN_FW_RANGE(0x14800, 0x1ffff, FORCEWAKE_RENDER), /*
1269 0x14800 - 0x14fff: render
1270 0x15000 - 0x16dff: reserved
1271 0x16e00 - 0x1bfff: render
1272 0x1c000 - 0x1ffff: reserved */
1273 GEN_FW_RANGE(0x20000, 0x20fff, FORCEWAKE_MEDIA_VDBOX0),
1274 GEN_FW_RANGE(0x21000, 0x21fff, FORCEWAKE_MEDIA_VDBOX2),
1275 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
1276 GEN_FW_RANGE(0x24000, 0x2417f, 0), /*
1277 0x24000 - 0x2407f: always on
1278 0x24080 - 0x2417f: reserved */
1279 GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /*
1280 0x24180 - 0x241ff: gt
1281 0x24200 - 0x249ff: reserved */
1282 GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /*
1283 0x24a00 - 0x24a7f: render
1284 0x24a80 - 0x251ff: reserved */
1285 GEN_FW_RANGE(0x25200, 0x255ff, FORCEWAKE_GT), /*
1286 0x25200 - 0x252ff: gt
1287 0x25300 - 0x255ff: reserved */
1288 GEN_FW_RANGE(0x25600, 0x2567f, FORCEWAKE_MEDIA_VDBOX0),
1289 GEN_FW_RANGE(0x25680, 0x259ff, FORCEWAKE_MEDIA_VDBOX2), /*
1290 0x25680 - 0x256ff: VD2
1291 0x25700 - 0x259ff: reserved */
1292 GEN_FW_RANGE(0x25a00, 0x25a7f, FORCEWAKE_MEDIA_VDBOX0),
1293 GEN_FW_RANGE(0x25a80, 0x2ffff, FORCEWAKE_MEDIA_VDBOX2), /*
1294 0x25a80 - 0x25aff: VD2
1295 0x25b00 - 0x2ffff: reserved */
1296 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
cf82d9dd 1297 GEN_FW_RANGE(0x40000, 0x1bffff, 0),
92f5df0d
MR
1298 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /*
1299 0x1c0000 - 0x1c2bff: VD0
1300 0x1c2c00 - 0x1c2cff: reserved
1301 0x1c2d00 - 0x1c2dff: VD0
1302 0x1c2e00 - 0x1c3eff: reserved
1303 0x1c3f00 - 0x1c3fff: VD0 */
1304 GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
1305 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*
1306 0x1c8000 - 0x1ca0ff: VE0
1307 0x1ca100 - 0x1cbeff: reserved
1308 0x1cbf00 - 0x1cbfff: VE0 */
1309 GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX0), /*
1310 0x1cc000 - 0x1ccfff: VD0
1311 0x1cd000 - 0x1cffff: reserved */
1312 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /*
1313 0x1d0000 - 0x1d2bff: VD2
1314 0x1d2c00 - 0x1d2cff: reserved
1315 0x1d2d00 - 0x1d2dff: VD2
1316 0x1d2e00 - 0x1d3eff: reserved
1317 0x1d3f00 - 0x1d3fff: VD2 */
cf82d9dd
MT
1318};
1319
bfac1e2b
MR
1320/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
1321static const struct intel_forcewake_range __xehp_fw_ranges[] = {
1322 GEN_FW_RANGE(0x0, 0x1fff, 0), /*
1323 0x0 - 0xaff: reserved
1324 0xb00 - 0x1fff: always on */
1325 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1326 GEN_FW_RANGE(0x2700, 0x4aff, FORCEWAKE_GT),
1327 GEN_FW_RANGE(0x4b00, 0x51ff, 0), /*
1328 0x4b00 - 0x4fff: reserved
1329 0x5000 - 0x51ff: always on */
1330 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1331 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1332 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1333 GEN_FW_RANGE(0x8160, 0x81ff, 0), /*
1334 0x8160 - 0x817f: reserved
1335 0x8180 - 0x81ff: always on */
1336 GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT),
1337 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1338 GEN_FW_RANGE(0x8500, 0x94cf, FORCEWAKE_GT), /*
1339 0x8500 - 0x87ff: gt
1340 0x8800 - 0x8fff: reserved
1341 0x9000 - 0x947f: gt
1342 0x9480 - 0x94cf: reserved */
1343 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1344 GEN_FW_RANGE(0x9560, 0x97ff, 0), /*
1345 0x9560 - 0x95ff: always on
1346 0x9600 - 0x97ff: reserved */
1347 GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /*
1348 0x9800 - 0xb4ff: gt
1349 0xb500 - 0xbfff: reserved
1350 0xc000 - 0xcfff: gt */
1351 GEN_FW_RANGE(0xd000, 0xd7ff, 0),
1352 GEN_FW_RANGE(0xd800, 0xdbff, FORCEWAKE_GT),
1353 GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER),
1354 GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /*
1355 0xdd00 - 0xddff: gt
1356 0xde00 - 0xde7f: reserved */
1357 GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /*
1358 0xde80 - 0xdfff: render
1359 0xe000 - 0xe0ff: reserved
1360 0xe100 - 0xe8ff: render */
1361 GEN_FW_RANGE(0xe900, 0xffff, FORCEWAKE_GT), /*
1362 0xe900 - 0xe9ff: gt
1363 0xea00 - 0xefff: reserved
1364 0xf000 - 0xffff: gt */
1365 GEN_FW_RANGE(0x10000, 0x13fff, 0), /*
1366 0x10000 - 0x11fff: reserved
1367 0x12000 - 0x127ff: always on
1368 0x12800 - 0x13fff: reserved */
1369 GEN_FW_RANGE(0x14000, 0x141ff, FORCEWAKE_MEDIA_VDBOX0),
1370 GEN_FW_RANGE(0x14200, 0x143ff, FORCEWAKE_MEDIA_VDBOX2),
1371 GEN_FW_RANGE(0x14400, 0x145ff, FORCEWAKE_MEDIA_VDBOX4),
1372 GEN_FW_RANGE(0x14600, 0x147ff, FORCEWAKE_MEDIA_VDBOX6),
1373 GEN_FW_RANGE(0x14800, 0x1ffff, FORCEWAKE_RENDER), /*
1374 0x14800 - 0x14fff: render
1375 0x15000 - 0x16dff: reserved
1376 0x16e00 - 0x1ffff: render */
1377 GEN_FW_RANGE(0x20000, 0x21fff, FORCEWAKE_MEDIA_VDBOX0), /*
1378 0x20000 - 0x20fff: VD0
1379 0x21000 - 0x21fff: reserved */
1380 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
1381 GEN_FW_RANGE(0x24000, 0x2417f, 0), /*
1382 0x24000 - 0x2407f: always on
1383 0x24080 - 0x2417f: reserved */
1384 GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /*
1385 0x24180 - 0x241ff: gt
1386 0x24200 - 0x249ff: reserved */
1387 GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /*
1388 0x24a00 - 0x24a7f: render
1389 0x24a80 - 0x251ff: reserved */
1390 GEN_FW_RANGE(0x25200, 0x25fff, FORCEWAKE_GT), /*
1391 0x25200 - 0x252ff: gt
1392 0x25300 - 0x25fff: reserved */
1393 GEN_FW_RANGE(0x26000, 0x2ffff, FORCEWAKE_RENDER), /*
1394 0x26000 - 0x27fff: render
1395 0x28000 - 0x29fff: reserved
1396 0x2a000 - 0x2ffff: undocumented */
1397 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
1398 GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1399 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /*
1400 0x1c0000 - 0x1c2bff: VD0
1401 0x1c2c00 - 0x1c2cff: reserved
1402 0x1c2d00 - 0x1c2dff: VD0
1403 0x1c2e00 - 0x1c3eff: reserved
1404 0x1c3f00 - 0x1c3fff: VD0 */
1405 GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), /*
1406 0x1c4000 - 0x1c6bff: VD1
1407 0x1c6c00 - 0x1c6cff: reserved
1408 0x1c6d00 - 0x1c6dff: VD1
1409 0x1c6e00 - 0x1c7fff: reserved */
1410 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*
1411 0x1c8000 - 0x1ca0ff: VE0
1412 0x1ca100 - 0x1cbfff: reserved */
1413 GEN_FW_RANGE(0x1cc000, 0x1ccfff, FORCEWAKE_MEDIA_VDBOX0),
1414 GEN_FW_RANGE(0x1cd000, 0x1cdfff, FORCEWAKE_MEDIA_VDBOX2),
1415 GEN_FW_RANGE(0x1ce000, 0x1cefff, FORCEWAKE_MEDIA_VDBOX4),
1416 GEN_FW_RANGE(0x1cf000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX6),
1417 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /*
1418 0x1d0000 - 0x1d2bff: VD2
1419 0x1d2c00 - 0x1d2cff: reserved
1420 0x1d2d00 - 0x1d2dff: VD2
1421 0x1d2e00 - 0x1d3eff: reserved
1422 0x1d3f00 - 0x1d3fff: VD2 */
1423 GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), /*
1424 0x1d4000 - 0x1d6bff: VD3
1425 0x1d6c00 - 0x1d6cff: reserved
1426 0x1d6d00 - 0x1d6dff: VD3
1427 0x1d6e00 - 0x1d7fff: reserved */
1428 GEN_FW_RANGE(0x1d8000, 0x1dffff, FORCEWAKE_MEDIA_VEBOX1), /*
1429 0x1d8000 - 0x1da0ff: VE1
1430 0x1da100 - 0x1dffff: reserved */
1431 GEN_FW_RANGE(0x1e0000, 0x1e3fff, FORCEWAKE_MEDIA_VDBOX4), /*
1432 0x1e0000 - 0x1e2bff: VD4
1433 0x1e2c00 - 0x1e2cff: reserved
1434 0x1e2d00 - 0x1e2dff: VD4
1435 0x1e2e00 - 0x1e3eff: reserved
1436 0x1e3f00 - 0x1e3fff: VD4 */
1437 GEN_FW_RANGE(0x1e4000, 0x1e7fff, FORCEWAKE_MEDIA_VDBOX5), /*
1438 0x1e4000 - 0x1e6bff: VD5
1439 0x1e6c00 - 0x1e6cff: reserved
1440 0x1e6d00 - 0x1e6dff: VD5
1441 0x1e6e00 - 0x1e7fff: reserved */
1442 GEN_FW_RANGE(0x1e8000, 0x1effff, FORCEWAKE_MEDIA_VEBOX2), /*
1443 0x1e8000 - 0x1ea0ff: VE2
1444 0x1ea100 - 0x1effff: reserved */
1445 GEN_FW_RANGE(0x1f0000, 0x1f3fff, FORCEWAKE_MEDIA_VDBOX6), /*
1446 0x1f0000 - 0x1f2bff: VD6
1447 0x1f2c00 - 0x1f2cff: reserved
1448 0x1f2d00 - 0x1f2dff: VD6
1449 0x1f2e00 - 0x1f3eff: reserved
1450 0x1f3f00 - 0x1f3fff: VD6 */
1451 GEN_FW_RANGE(0x1f4000, 0x1f7fff, FORCEWAKE_MEDIA_VDBOX7), /*
1452 0x1f4000 - 0x1f6bff: VD7
1453 0x1f6c00 - 0x1f6cff: reserved
1454 0x1f6d00 - 0x1f6dff: VD7
1455 0x1f6e00 - 0x1f7fff: reserved */
1456 GEN_FW_RANGE(0x1f8000, 0x1fa0ff, FORCEWAKE_MEDIA_VEBOX3),
1457};
1458
907b28c5 1459static void
6ebc9692 1460ilk_dummy_write(struct intel_uncore *uncore)
907b28c5
CW
1461{
1462 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
1463 * the chip from rc6 before touching it for real. MI_MODE is masked,
1464 * hence harmless to write 0 into. */
6cc5ca76 1465 __raw_uncore_write32(uncore, MI_MODE, 0);
907b28c5
CW
1466}
1467
1468static void
2cf7bf6f 1469__unclaimed_reg_debug(struct intel_uncore *uncore,
9c053501
MK
1470 const i915_reg_t reg,
1471 const bool read,
1472 const bool before)
907b28c5 1473{
a9f236d1
PB
1474 if (drm_WARN(&uncore->i915->drm,
1475 check_for_unclaimed_mmio(uncore) && !before,
1476 "Unclaimed %s register 0x%x\n",
1477 read ? "read from" : "write to",
1478 i915_mmio_reg_offset(reg)))
4f044a88 1479 /* Only report the first N failures */
8a25c4be 1480 uncore->i915->params.mmio_debug--;
907b28c5
CW
1481}
1482
9c053501 1483static inline void
2cf7bf6f 1484unclaimed_reg_debug(struct intel_uncore *uncore,
9c053501
MK
1485 const i915_reg_t reg,
1486 const bool read,
1487 const bool before)
1488{
8a25c4be 1489 if (likely(!uncore->i915->params.mmio_debug))
9c053501
MK
1490 return;
1491
0a9b2630
DCS
1492 /* interrupts are disabled and re-enabled around uncore->lock usage */
1493 lockdep_assert_held(&uncore->lock);
1494
1495 if (before)
1496 spin_lock(&uncore->debug->lock);
1497
2cf7bf6f 1498 __unclaimed_reg_debug(uncore, reg, read, before);
0a9b2630
DCS
1499
1500 if (!before)
1501 spin_unlock(&uncore->debug->lock);
9c053501
MK
1502}
1503
0e65ce24
CW
1504#define __vgpu_read(x) \
1505static u##x \
1506vgpu_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1507 u##x val = __raw_uncore_read##x(uncore, reg); \
1508 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1509 return val; \
1510}
1511__vgpu_read(8)
1512__vgpu_read(16)
1513__vgpu_read(32)
1514__vgpu_read(64)
1515
51f67885 1516#define GEN2_READ_HEADER(x) \
5d738795 1517 u##x val = 0; \
87b391b9 1518 assert_rpm_wakelock_held(uncore->rpm);
5d738795 1519
51f67885 1520#define GEN2_READ_FOOTER \
5d738795
BW
1521 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1522 return val
1523
51f67885 1524#define __gen2_read(x) \
0b274481 1525static u##x \
a2b4abfc 1526gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
51f67885 1527 GEN2_READ_HEADER(x); \
6cc5ca76 1528 val = __raw_uncore_read##x(uncore, reg); \
51f67885 1529 GEN2_READ_FOOTER; \
3967018e
BW
1530}
1531
1532#define __gen5_read(x) \
1533static u##x \
a2b4abfc 1534gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
51f67885 1535 GEN2_READ_HEADER(x); \
6ebc9692 1536 ilk_dummy_write(uncore); \
6cc5ca76 1537 val = __raw_uncore_read##x(uncore, reg); \
51f67885 1538 GEN2_READ_FOOTER; \
3967018e
BW
1539}
1540
51f67885
CW
1541__gen5_read(8)
1542__gen5_read(16)
1543__gen5_read(32)
1544__gen5_read(64)
1545__gen2_read(8)
1546__gen2_read(16)
1547__gen2_read(32)
1548__gen2_read(64)
1549
1550#undef __gen5_read
1551#undef __gen2_read
1552
1553#undef GEN2_READ_FOOTER
1554#undef GEN2_READ_HEADER
1555
1556#define GEN6_READ_HEADER(x) \
f0f59a00 1557 u32 offset = i915_mmio_reg_offset(reg); \
51f67885
CW
1558 unsigned long irqflags; \
1559 u##x val = 0; \
87b391b9 1560 assert_rpm_wakelock_held(uncore->rpm); \
272c7e52 1561 spin_lock_irqsave(&uncore->lock, irqflags); \
2cf7bf6f 1562 unclaimed_reg_debug(uncore, reg, true, true)
51f67885
CW
1563
1564#define GEN6_READ_FOOTER \
2cf7bf6f 1565 unclaimed_reg_debug(uncore, reg, true, false); \
272c7e52 1566 spin_unlock_irqrestore(&uncore->lock, irqflags); \
51f67885
CW
1567 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1568 return val
1569
f568eeee 1570static noinline void ___force_wake_auto(struct intel_uncore *uncore,
c521b0c8 1571 enum forcewake_domains fw_domains)
b2cff0db
CW
1572{
1573 struct intel_uncore_forcewake_domain *domain;
d2dc94bc
CW
1574 unsigned int tmp;
1575
f568eeee 1576 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
b2cff0db 1577
f568eeee 1578 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)
c521b0c8
TU
1579 fw_domain_arm_timer(domain);
1580
f568eeee 1581 uncore->funcs.force_wake_get(uncore, fw_domains);
c521b0c8
TU
1582}
1583
f568eeee 1584static inline void __force_wake_auto(struct intel_uncore *uncore,
c521b0c8
TU
1585 enum forcewake_domains fw_domains)
1586{
77adbd8f 1587 GEM_BUG_ON(!fw_domains);
b2cff0db 1588
003342a5 1589 /* Turn on all requested but inactive supported forcewake domains. */
f568eeee
DCS
1590 fw_domains &= uncore->fw_domains;
1591 fw_domains &= ~uncore->fw_domains_active;
b2cff0db 1592
c521b0c8 1593 if (fw_domains)
f568eeee 1594 ___force_wake_auto(uncore, fw_domains);
b2cff0db
CW
1595}
1596
ccfceda2 1597#define __gen_read(func, x) \
3967018e 1598static u##x \
a2b4abfc 1599func##_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
6863b76c 1600 enum forcewake_domains fw_engine; \
51f67885 1601 GEN6_READ_HEADER(x); \
272c7e52 1602 fw_engine = __##func##_reg_read_fw_domains(uncore, offset); \
6a42d0f4 1603 if (fw_engine) \
272c7e52 1604 __force_wake_auto(uncore, fw_engine); \
6cc5ca76 1605 val = __raw_uncore_read##x(uncore, reg); \
51f67885 1606 GEN6_READ_FOOTER; \
940aece4 1607}
ccb2acea
DCS
1608
1609#define __gen_reg_read_funcs(func) \
1610static enum forcewake_domains \
1611func##_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \
1612 return __##func##_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg)); \
1613} \
1614\
1615__gen_read(func, 8) \
1616__gen_read(func, 16) \
1617__gen_read(func, 32) \
1618__gen_read(func, 64)
1619
cf82d9dd 1620__gen_reg_read_funcs(gen12_fwtable);
ccb2acea
DCS
1621__gen_reg_read_funcs(gen11_fwtable);
1622__gen_reg_read_funcs(fwtable);
1623__gen_reg_read_funcs(gen6);
1624
1625#undef __gen_reg_read_funcs
51f67885
CW
1626#undef GEN6_READ_FOOTER
1627#undef GEN6_READ_HEADER
5d738795 1628
51f67885 1629#define GEN2_WRITE_HEADER \
5d738795 1630 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
87b391b9 1631 assert_rpm_wakelock_held(uncore->rpm); \
907b28c5 1632
51f67885 1633#define GEN2_WRITE_FOOTER
0d965301 1634
51f67885 1635#define __gen2_write(x) \
0b274481 1636static void \
a2b4abfc 1637gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
51f67885 1638 GEN2_WRITE_HEADER; \
6cc5ca76 1639 __raw_uncore_write##x(uncore, reg, val); \
51f67885 1640 GEN2_WRITE_FOOTER; \
4032ef43
BW
1641}
1642
1643#define __gen5_write(x) \
1644static void \
a2b4abfc 1645gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
51f67885 1646 GEN2_WRITE_HEADER; \
6ebc9692 1647 ilk_dummy_write(uncore); \
6cc5ca76 1648 __raw_uncore_write##x(uncore, reg, val); \
51f67885 1649 GEN2_WRITE_FOOTER; \
4032ef43
BW
1650}
1651
51f67885
CW
1652__gen5_write(8)
1653__gen5_write(16)
1654__gen5_write(32)
51f67885
CW
1655__gen2_write(8)
1656__gen2_write(16)
1657__gen2_write(32)
51f67885
CW
1658
1659#undef __gen5_write
1660#undef __gen2_write
1661
1662#undef GEN2_WRITE_FOOTER
1663#undef GEN2_WRITE_HEADER
1664
1665#define GEN6_WRITE_HEADER \
f0f59a00 1666 u32 offset = i915_mmio_reg_offset(reg); \
51f67885
CW
1667 unsigned long irqflags; \
1668 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
87b391b9 1669 assert_rpm_wakelock_held(uncore->rpm); \
272c7e52 1670 spin_lock_irqsave(&uncore->lock, irqflags); \
2cf7bf6f 1671 unclaimed_reg_debug(uncore, reg, false, true)
51f67885
CW
1672
1673#define GEN6_WRITE_FOOTER \
2cf7bf6f 1674 unclaimed_reg_debug(uncore, reg, false, false); \
272c7e52 1675 spin_unlock_irqrestore(&uncore->lock, irqflags)
51f67885 1676
4032ef43
BW
1677#define __gen6_write(x) \
1678static void \
a2b4abfc 1679gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
51f67885 1680 GEN6_WRITE_HEADER; \
a338908c 1681 if (NEEDS_FORCE_WAKE(offset)) \
6ebc9692 1682 __gen6_gt_wait_for_fifo(uncore); \
6cc5ca76 1683 __raw_uncore_write##x(uncore, reg, val); \
51f67885 1684 GEN6_WRITE_FOOTER; \
4032ef43 1685}
ccb2acea
DCS
1686__gen6_write(8)
1687__gen6_write(16)
1688__gen6_write(32)
4032ef43 1689
ccfceda2 1690#define __gen_write(func, x) \
ab2aa47e 1691static void \
a2b4abfc 1692func##_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
6863b76c 1693 enum forcewake_domains fw_engine; \
51f67885 1694 GEN6_WRITE_HEADER; \
272c7e52 1695 fw_engine = __##func##_reg_write_fw_domains(uncore, offset); \
6a42d0f4 1696 if (fw_engine) \
272c7e52 1697 __force_wake_auto(uncore, fw_engine); \
6cc5ca76 1698 __raw_uncore_write##x(uncore, reg, val); \
51f67885 1699 GEN6_WRITE_FOOTER; \
1938e59a 1700}
4032ef43 1701
ccb2acea
DCS
1702#define __gen_reg_write_funcs(func) \
1703static enum forcewake_domains \
1704func##_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \
1705 return __##func##_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg)); \
1706} \
1707\
1708__gen_write(func, 8) \
1709__gen_write(func, 16) \
1710__gen_write(func, 32)
1711
bfac1e2b 1712__gen_reg_write_funcs(xehp_fwtable);
cf82d9dd 1713__gen_reg_write_funcs(gen12_fwtable);
ccb2acea
DCS
1714__gen_reg_write_funcs(gen11_fwtable);
1715__gen_reg_write_funcs(fwtable);
1716__gen_reg_write_funcs(gen8);
1717
1718#undef __gen_reg_write_funcs
51f67885
CW
1719#undef GEN6_WRITE_FOOTER
1720#undef GEN6_WRITE_HEADER
907b28c5 1721
0e65ce24
CW
1722#define __vgpu_write(x) \
1723static void \
1724vgpu_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1725 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1726 __raw_uncore_write##x(uncore, reg, val); \
1727}
1728__vgpu_write(8)
1729__vgpu_write(16)
1730__vgpu_write(32)
1731
ccb2acea 1732#define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \
43d942a7 1733do { \
f7de5027
DCS
1734 (uncore)->funcs.mmio_writeb = x##_write8; \
1735 (uncore)->funcs.mmio_writew = x##_write16; \
1736 (uncore)->funcs.mmio_writel = x##_write32; \
43d942a7
YZ
1737} while (0)
1738
ccb2acea 1739#define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x) \
43d942a7 1740do { \
f7de5027
DCS
1741 (uncore)->funcs.mmio_readb = x##_read8; \
1742 (uncore)->funcs.mmio_readw = x##_read16; \
1743 (uncore)->funcs.mmio_readl = x##_read32; \
1744 (uncore)->funcs.mmio_readq = x##_read64; \
43d942a7
YZ
1745} while (0)
1746
ccb2acea
DCS
1747#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
1748do { \
1749 ASSIGN_RAW_WRITE_MMIO_VFUNCS((uncore), x); \
1750 (uncore)->funcs.write_fw_domains = x##_reg_write_fw_domains; \
1751} while (0)
1752
1753#define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
1754do { \
1755 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x); \
1756 (uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \
1757} while (0)
05a2fb15 1758
f833cdb0
DCS
1759static int __fw_domain_init(struct intel_uncore *uncore,
1760 enum forcewake_domain_id domain_id,
1761 i915_reg_t reg_set,
1762 i915_reg_t reg_ack)
05a2fb15
MK
1763{
1764 struct intel_uncore_forcewake_domain *d;
1765
f833cdb0
DCS
1766 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
1767 GEM_BUG_ON(uncore->fw_domain[domain_id]);
05a2fb15 1768
50d84418 1769 if (i915_inject_probe_failure(uncore->i915))
f833cdb0 1770 return -ENOMEM;
05a2fb15 1771
f833cdb0
DCS
1772 d = kzalloc(sizeof(*d), GFP_KERNEL);
1773 if (!d)
1774 return -ENOMEM;
05a2fb15 1775
a9f236d1
PB
1776 drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_set));
1777 drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_ack));
6e3955a5 1778
f833cdb0 1779 d->uncore = uncore;
05a2fb15 1780 d->wake_count = 0;
25286aac
DCS
1781 d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set);
1782 d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack);
05a2fb15 1783
05a2fb15
MK
1784 d->id = domain_id;
1785
33c582c1 1786 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
55e3c170 1787 BUILD_BUG_ON(FORCEWAKE_GT != (1 << FW_DOMAIN_ID_GT));
33c582c1 1788 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
a89a70a8
DCS
1789 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0));
1790 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1));
1791 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2));
1792 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3));
bfac1e2b
MR
1793 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX4 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX4));
1794 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX5 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX5));
1795 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX6 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX6));
1796 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX7 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX7));
a89a70a8
DCS
1797 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0));
1798 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1));
bfac1e2b
MR
1799 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX2));
1800 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX3));
a89a70a8 1801
d2dc94bc 1802 d->mask = BIT(domain_id);
33c582c1 1803
a57a4a67
TU
1804 hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1805 d->timer.function = intel_uncore_fw_release_timer;
05a2fb15 1806
535d8d27 1807 uncore->fw_domains |= BIT(domain_id);
f9b3927a 1808
159367bb 1809 fw_domain_reset(d);
f833cdb0
DCS
1810
1811 uncore->fw_domain[domain_id] = d;
1812
1813 return 0;
05a2fb15
MK
1814}
1815
f7de5027 1816static void fw_domain_fini(struct intel_uncore *uncore,
26376a7e
OM
1817 enum forcewake_domain_id domain_id)
1818{
1819 struct intel_uncore_forcewake_domain *d;
1820
f833cdb0 1821 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
26376a7e 1822
f833cdb0
DCS
1823 d = fetch_and_zero(&uncore->fw_domain[domain_id]);
1824 if (!d)
1825 return;
26376a7e 1826
f833cdb0 1827 uncore->fw_domains &= ~BIT(domain_id);
a9f236d1
PB
1828 drm_WARN_ON(&uncore->i915->drm, d->wake_count);
1829 drm_WARN_ON(&uncore->i915->drm, hrtimer_cancel(&d->timer));
f833cdb0
DCS
1830 kfree(d);
1831}
26376a7e 1832
f833cdb0
DCS
1833static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore)
1834{
1835 struct intel_uncore_forcewake_domain *d;
1836 int tmp;
1837
1838 for_each_fw_domain(d, uncore, tmp)
1839 fw_domain_fini(uncore, d->id);
26376a7e
OM
1840}
1841
f833cdb0 1842static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
0b274481 1843{
01385758 1844 struct drm_i915_private *i915 = uncore->i915;
f833cdb0 1845 int ret = 0;
f7de5027 1846
2e81bc61 1847 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
3225b2f9 1848
f833cdb0
DCS
1849#define fw_domain_init(uncore__, id__, set__, ack__) \
1850 (ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__))))
1851
651e7d48 1852 if (GRAPHICS_VER(i915) >= 11) {
242613af 1853 /* we'll prune the domains of missing engines later */
792592e7 1854 intel_engine_mask_t emask = INTEL_INFO(i915)->platform_engine_mask;
a89a70a8
DCS
1855 int i;
1856
f833cdb0 1857 uncore->funcs.force_wake_get = fw_domains_get_with_fallback;
f7de5027
DCS
1858 uncore->funcs.force_wake_put = fw_domains_put;
1859 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
a89a70a8
DCS
1860 FORCEWAKE_RENDER_GEN9,
1861 FORCEWAKE_ACK_RENDER_GEN9);
55e3c170
MR
1862 fw_domain_init(uncore, FW_DOMAIN_ID_GT,
1863 FORCEWAKE_GT_GEN9,
1864 FORCEWAKE_ACK_GT_GEN9);
f833cdb0 1865
a89a70a8 1866 for (i = 0; i < I915_MAX_VCS; i++) {
242613af 1867 if (!__HAS_ENGINE(emask, _VCS(i)))
a89a70a8
DCS
1868 continue;
1869
f7de5027 1870 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
a89a70a8
DCS
1871 FORCEWAKE_MEDIA_VDBOX_GEN11(i),
1872 FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
1873 }
1874 for (i = 0; i < I915_MAX_VECS; i++) {
242613af 1875 if (!__HAS_ENGINE(emask, _VECS(i)))
a89a70a8
DCS
1876 continue;
1877
f7de5027 1878 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
a89a70a8
DCS
1879 FORCEWAKE_MEDIA_VEBOX_GEN11(i),
1880 FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
1881 }
651e7d48 1882 } else if (IS_GRAPHICS_VER(i915, 9, 10)) {
f833cdb0 1883 uncore->funcs.force_wake_get = fw_domains_get_with_fallback;
f7de5027
DCS
1884 uncore->funcs.force_wake_put = fw_domains_put;
1885 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
05a2fb15
MK
1886 FORCEWAKE_RENDER_GEN9,
1887 FORCEWAKE_ACK_RENDER_GEN9);
55e3c170
MR
1888 fw_domain_init(uncore, FW_DOMAIN_ID_GT,
1889 FORCEWAKE_GT_GEN9,
1890 FORCEWAKE_ACK_GT_GEN9);
f7de5027 1891 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
05a2fb15 1892 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
f7de5027
DCS
1893 } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
1894 uncore->funcs.force_wake_get = fw_domains_get;
1895 uncore->funcs.force_wake_put = fw_domains_put;
1896 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
05a2fb15 1897 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
f7de5027 1898 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
05a2fb15 1899 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
f7de5027
DCS
1900 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
1901 uncore->funcs.force_wake_get =
05a2fb15 1902 fw_domains_get_with_thread_status;
f7de5027
DCS
1903 uncore->funcs.force_wake_put = fw_domains_put;
1904 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
05a2fb15 1905 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
f7de5027 1906 } else if (IS_IVYBRIDGE(i915)) {
0b274481
BW
1907 u32 ecobus;
1908
1909 /* IVB configs may use multi-threaded forcewake */
1910
1911 /* A small trick here - if the bios hasn't configured
1912 * MT forcewake, and if the device is in RC6, then
1913 * force_wake_mt_get will not wake the device and the
1914 * ECOBUS read will return zero. Which will be
1915 * (correctly) interpreted by the test below as MT
1916 * forcewake being disabled.
1917 */
f7de5027 1918 uncore->funcs.force_wake_get =
05a2fb15 1919 fw_domains_get_with_thread_status;
f7de5027 1920 uncore->funcs.force_wake_put = fw_domains_put;
05a2fb15 1921
f9b3927a
MK
1922 /* We need to init first for ECOBUS access and then
1923 * determine later if we want to reinit, in case of MT access is
6ea2556f
MK
1924 * not working. In this stage we don't know which flavour this
1925 * ivb is, so it is better to reset also the gen6 fw registers
1926 * before the ecobus check.
f9b3927a 1927 */
6ea2556f 1928
6cc5ca76 1929 __raw_uncore_write32(uncore, FORCEWAKE, 0);
6ebc9692 1930 __raw_posting_read(uncore, ECOBUS);
6ea2556f 1931
f833cdb0
DCS
1932 ret = __fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1933 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1934 if (ret)
1935 goto out;
f9b3927a 1936
f7de5027
DCS
1937 spin_lock_irq(&uncore->lock);
1938 fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
6cc5ca76 1939 ecobus = __raw_uncore_read32(uncore, ECOBUS);
f7de5027
DCS
1940 fw_domains_put(uncore, FORCEWAKE_RENDER);
1941 spin_unlock_irq(&uncore->lock);
0b274481 1942
05a2fb15 1943 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
d0208cfa
WK
1944 drm_info(&i915->drm, "No MT forcewake available on Ivybridge, this can result in issues\n");
1945 drm_info(&i915->drm, "when using vblank-synced partial screen updates.\n");
f833cdb0 1946 fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER);
f7de5027 1947 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
05a2fb15 1948 FORCEWAKE, FORCEWAKE_ACK);
0b274481 1949 }
651e7d48 1950 } else if (GRAPHICS_VER(i915) == 6) {
f7de5027 1951 uncore->funcs.force_wake_get =
05a2fb15 1952 fw_domains_get_with_thread_status;
f7de5027
DCS
1953 uncore->funcs.force_wake_put = fw_domains_put;
1954 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
05a2fb15 1955 FORCEWAKE, FORCEWAKE_ACK);
0b274481 1956 }
3225b2f9 1957
f833cdb0
DCS
1958#undef fw_domain_init
1959
3225b2f9 1960 /* All future platforms are expected to require complex power gating */
48a1b8d4 1961 drm_WARN_ON(&i915->drm, !ret && uncore->fw_domains == 0);
f833cdb0
DCS
1962
1963out:
1964 if (ret)
1965 intel_uncore_fw_domains_fini(uncore);
1966
1967 return ret;
f9b3927a
MK
1968}
1969
f7de5027 1970#define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \
15157970 1971{ \
f7de5027 1972 (uncore)->fw_domains_table = \
15157970 1973 (struct intel_forcewake_range *)(d); \
f7de5027 1974 (uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \
15157970
TU
1975}
1976
264ec1a8
HG
1977static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
1978 unsigned long action, void *data)
1979{
9102650f
DCS
1980 struct intel_uncore *uncore = container_of(nb,
1981 struct intel_uncore, pmic_bus_access_nb);
264ec1a8
HG
1982
1983 switch (action) {
1984 case MBI_PMIC_BUS_ACCESS_BEGIN:
1985 /*
1986 * forcewake all now to make sure that we don't need to do a
1987 * forcewake later which on systems where this notifier gets
1988 * called requires the punit to access to the shared pmic i2c
1989 * bus, which will be busy after this notification, leading to:
1990 * "render: timed out waiting for forcewake ack request."
1991 * errors.
ce30560c
HG
1992 *
1993 * The notifier is unregistered during intel_runtime_suspend(),
1994 * so it's ok to access the HW here without holding a RPM
1995 * wake reference -> disable wakeref asserts for the time of
1996 * the access.
264ec1a8 1997 */
9102650f
DCS
1998 disable_rpm_wakeref_asserts(uncore->rpm);
1999 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
2000 enable_rpm_wakeref_asserts(uncore->rpm);
264ec1a8
HG
2001 break;
2002 case MBI_PMIC_BUS_ACCESS_END:
9102650f 2003 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
264ec1a8
HG
2004 break;
2005 }
2006
2007 return NOTIFY_OK;
2008}
2009
25286aac
DCS
2010static int uncore_mmio_setup(struct intel_uncore *uncore)
2011{
01385758 2012 struct drm_i915_private *i915 = uncore->i915;
8ff5446a 2013 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
25286aac
DCS
2014 int mmio_bar;
2015 int mmio_size;
2016
651e7d48 2017 mmio_bar = GRAPHICS_VER(i915) == 2 ? 1 : 0;
25286aac
DCS
2018 /*
2019 * Before gen4, the registers and the GTT are behind different BARs.
2020 * However, from gen4 onwards, the registers and the GTT are shared
2021 * in the same BAR, so we want to restrict this ioremap from
2022 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
2023 * the register BAR remains the same size for all the earlier
2024 * generations up to Ironlake.
eafeb204 2025 * For dgfx chips register range is expanded to 4MB.
25286aac 2026 */
651e7d48 2027 if (GRAPHICS_VER(i915) < 5)
25286aac 2028 mmio_size = 512 * 1024;
eafeb204
VSD
2029 else if (IS_DGFX(i915))
2030 mmio_size = 4 * 1024 * 1024;
25286aac
DCS
2031 else
2032 mmio_size = 2 * 1024 * 1024;
eafeb204 2033
25286aac
DCS
2034 uncore->regs = pci_iomap(pdev, mmio_bar, mmio_size);
2035 if (uncore->regs == NULL) {
d0208cfa 2036 drm_err(&i915->drm, "failed to map registers\n");
25286aac
DCS
2037 return -EIO;
2038 }
2039
2040 return 0;
2041}
2042
2043static void uncore_mmio_cleanup(struct intel_uncore *uncore)
2044{
8ff5446a 2045 struct pci_dev *pdev = to_pci_dev(uncore->i915->drm.dev);
25286aac
DCS
2046
2047 pci_iounmap(pdev, uncore->regs);
2048}
2049
01385758
DCS
2050void intel_uncore_init_early(struct intel_uncore *uncore,
2051 struct drm_i915_private *i915)
6cbe8830
DCS
2052{
2053 spin_lock_init(&uncore->lock);
01385758
DCS
2054 uncore->i915 = i915;
2055 uncore->rpm = &i915->runtime_pm;
0a9b2630 2056 uncore->debug = &i915->mmio_debug;
6cbe8830 2057}
25286aac 2058
2e81bc61 2059static void uncore_raw_init(struct intel_uncore *uncore)
f9b3927a 2060{
2e81bc61 2061 GEM_BUG_ON(intel_uncore_has_forcewake(uncore));
25286aac 2062
0e65ce24
CW
2063 if (intel_vgpu_active(uncore->i915)) {
2064 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, vgpu);
2065 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, vgpu);
651e7d48 2066 } else if (GRAPHICS_VER(uncore->i915) == 5) {
2e81bc61
DCS
2067 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5);
2068 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5);
2069 } else {
2070 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2);
2071 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2);
2072 }
2073}
f7de5027 2074
f833cdb0 2075static int uncore_forcewake_init(struct intel_uncore *uncore)
2e81bc61
DCS
2076{
2077 struct drm_i915_private *i915 = uncore->i915;
f833cdb0 2078 int ret;
cf9d2890 2079
2e81bc61 2080 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
5a0ba777 2081
f833cdb0
DCS
2082 ret = intel_uncore_fw_domains_init(uncore);
2083 if (ret)
2084 return ret;
2e81bc61 2085 forcewake_early_sanitize(uncore, 0);
75714940 2086
bfac1e2b
MR
2087 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
2088 ASSIGN_FW_DOMAINS_TABLE(uncore, __xehp_fw_ranges);
2089 ASSIGN_WRITE_MMIO_VFUNCS(uncore, xehp_fwtable);
f7de5027 2090 ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
bfac1e2b 2091 } else if (GRAPHICS_VER(i915) >= 12) {
cf82d9dd
MT
2092 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges);
2093 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen12_fwtable);
2094 ASSIGN_READ_MMIO_VFUNCS(uncore, gen12_fwtable);
bfac1e2b
MR
2095 } else if (GRAPHICS_VER(i915) == 11) {
2096 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
2097 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable);
2098 ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
2099 } else if (IS_GRAPHICS_VER(i915, 9, 10)) {
2100 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
2101 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2102 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
2103 } else if (IS_CHERRYVIEW(i915)) {
2104 ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
2105 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2106 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
2107 } else if (GRAPHICS_VER(i915) == 8) {
2108 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8);
2109 ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
2110 } else if (IS_VALLEYVIEW(i915)) {
2111 ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges);
2112 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
2113 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
2114 } else if (IS_GRAPHICS_VER(i915, 6, 7)) {
2115 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
2116 ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
3967018e 2117 }
ed493883 2118
2e81bc61
DCS
2119 uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier;
2120 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
f833cdb0
DCS
2121
2122 return 0;
2e81bc61
DCS
2123}
2124
2125int intel_uncore_init_mmio(struct intel_uncore *uncore)
2126{
2127 struct drm_i915_private *i915 = uncore->i915;
2128 int ret;
2129
2130 ret = uncore_mmio_setup(uncore);
2131 if (ret)
2132 return ret;
2133
c256af0d
MR
2134 /*
2135 * The boot firmware initializes local memory and assesses its health.
2136 * If memory training fails, the punit will have been instructed to
2137 * keep the GT powered down; we won't be able to communicate with it
2138 * and we should not continue with driver initialization.
2139 */
2140 if (IS_DGFX(i915) &&
2141 !(__raw_uncore_read32(uncore, GU_CNTL) & LMEM_INIT)) {
2142 drm_err(&i915->drm, "LMEM not initialized by firmware\n");
2143 return -ENODEV;
2144 }
2145
2e81bc61
DCS
2146 if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915))
2147 uncore->flags |= UNCORE_HAS_FORCEWAKE;
2148
f833cdb0 2149 if (!intel_uncore_has_forcewake(uncore)) {
2e81bc61 2150 uncore_raw_init(uncore);
f833cdb0
DCS
2151 } else {
2152 ret = uncore_forcewake_init(uncore);
2153 if (ret)
2154 goto out_mmio_cleanup;
2155 }
2e81bc61 2156
ccb2acea
DCS
2157 /* make sure fw funcs are set if and only if we have fw*/
2158 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_get);
2159 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_put);
2160 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains);
2161 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains);
2162
2cf7bf6f
DCS
2163 if (HAS_FPGA_DBG_UNCLAIMED(i915))
2164 uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED;
2165
2166 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
2167 uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED;
2168
651e7d48 2169 if (IS_GRAPHICS_VER(i915, 6, 7))
2cf7bf6f
DCS
2170 uncore->flags |= UNCORE_HAS_FIFO;
2171
2e81bc61 2172 /* clear out unclaimed reg detection bit */
0a9b2630 2173 if (intel_uncore_unclaimed_mmio(uncore))
d0208cfa 2174 drm_dbg(&i915->drm, "unclaimed mmio detected on uncore init, clearing\n");
25286aac
DCS
2175
2176 return 0;
f833cdb0
DCS
2177
2178out_mmio_cleanup:
2179 uncore_mmio_cleanup(uncore);
2180
2181 return ret;
0b274481
BW
2182}
2183
26376a7e
OM
2184/*
2185 * We might have detected that some engines are fused off after we initialized
2186 * the forcewake domains. Prune them, to make sure they only reference existing
2187 * engines.
2188 */
242613af
DCS
2189void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
2190 struct intel_gt *gt)
26376a7e 2191{
2e81bc61
DCS
2192 enum forcewake_domains fw_domains = uncore->fw_domains;
2193 enum forcewake_domain_id domain_id;
2194 int i;
f7de5027 2195
651e7d48 2196 if (!intel_uncore_has_forcewake(uncore) || GRAPHICS_VER(uncore->i915) < 11)
2e81bc61 2197 return;
26376a7e 2198
2e81bc61
DCS
2199 for (i = 0; i < I915_MAX_VCS; i++) {
2200 domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
26376a7e 2201
242613af 2202 if (HAS_ENGINE(gt, _VCS(i)))
2e81bc61 2203 continue;
26376a7e 2204
bfac1e2b
MR
2205 /*
2206 * Starting with XeHP, the power well for an even-numbered
2207 * VDBOX is also used for shared units within the
2208 * media slice such as SFC. So even if the engine
2209 * itself is fused off, we still need to initialize
2210 * the forcewake domain if any of the other engines
2211 * in the same media slice are present.
2212 */
2213 if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 50) && i % 2 == 0) {
2214 if ((i + 1 < I915_MAX_VCS) && HAS_ENGINE(gt, _VCS(i + 1)))
2215 continue;
2216
2217 if (HAS_ENGINE(gt, _VECS(i / 2)))
2218 continue;
2219 }
2220
2e81bc61
DCS
2221 if (fw_domains & BIT(domain_id))
2222 fw_domain_fini(uncore, domain_id);
2223 }
26376a7e 2224
2e81bc61
DCS
2225 for (i = 0; i < I915_MAX_VECS; i++) {
2226 domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
26376a7e 2227
242613af 2228 if (HAS_ENGINE(gt, _VECS(i)))
2e81bc61 2229 continue;
26376a7e 2230
2e81bc61
DCS
2231 if (fw_domains & BIT(domain_id))
2232 fw_domain_fini(uncore, domain_id);
26376a7e
OM
2233 }
2234}
2235
3de6f852 2236void intel_uncore_fini_mmio(struct intel_uncore *uncore)
0b274481 2237{
2e81bc61
DCS
2238 if (intel_uncore_has_forcewake(uncore)) {
2239 iosf_mbi_punit_acquire();
2240 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
2241 &uncore->pmic_bus_access_nb);
2242 intel_uncore_forcewake_reset(uncore);
f833cdb0 2243 intel_uncore_fw_domains_fini(uncore);
2e81bc61
DCS
2244 iosf_mbi_punit_release();
2245 }
2246
25286aac 2247 uncore_mmio_cleanup(uncore);
0b274481
BW
2248}
2249
3fd3a6ff
JL
2250static const struct reg_whitelist {
2251 i915_reg_t offset_ldw;
2252 i915_reg_t offset_udw;
33adf482
LDM
2253 u8 min_graphics_ver;
2254 u8 max_graphics_ver;
3fd3a6ff
JL
2255 u8 size;
2256} reg_read_whitelist[] = { {
2257 .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
2258 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
33adf482
LDM
2259 .min_graphics_ver = 4,
2260 .max_graphics_ver = 12,
3fd3a6ff
JL
2261 .size = 8
2262} };
907b28c5
CW
2263
2264int i915_reg_read_ioctl(struct drm_device *dev,
2265 void *data, struct drm_file *file)
2266{
8ed3a623
TU
2267 struct drm_i915_private *i915 = to_i915(dev);
2268 struct intel_uncore *uncore = &i915->uncore;
907b28c5 2269 struct drm_i915_reg_read *reg = data;
3fd3a6ff 2270 struct reg_whitelist const *entry;
538ef96b 2271 intel_wakeref_t wakeref;
3fd3a6ff
JL
2272 unsigned int flags;
2273 int remain;
2274 int ret = 0;
2275
2276 entry = reg_read_whitelist;
2277 remain = ARRAY_SIZE(reg_read_whitelist);
2278 while (remain) {
2279 u32 entry_offset = i915_mmio_reg_offset(entry->offset_ldw);
2280
2281 GEM_BUG_ON(!is_power_of_2(entry->size));
2282 GEM_BUG_ON(entry->size > 8);
2283 GEM_BUG_ON(entry_offset & (entry->size - 1));
2284
33adf482 2285 if (IS_GRAPHICS_VER(i915, entry->min_graphics_ver, entry->max_graphics_ver) &&
3fd3a6ff 2286 entry_offset == (reg->offset & -entry->size))
907b28c5 2287 break;
3fd3a6ff
JL
2288 entry++;
2289 remain--;
907b28c5
CW
2290 }
2291
3fd3a6ff 2292 if (!remain)
907b28c5
CW
2293 return -EINVAL;
2294
3fd3a6ff 2295 flags = reg->offset & (entry->size - 1);
648a9bc5 2296
c447ff7d 2297 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
d4225a53 2298 if (entry->size == 8 && flags == I915_REG_READ_8B_WA)
8ed3a623
TU
2299 reg->val = intel_uncore_read64_2x32(uncore,
2300 entry->offset_ldw,
2301 entry->offset_udw);
d4225a53 2302 else if (entry->size == 8 && flags == 0)
8ed3a623
TU
2303 reg->val = intel_uncore_read64(uncore,
2304 entry->offset_ldw);
d4225a53 2305 else if (entry->size == 4 && flags == 0)
8ed3a623 2306 reg->val = intel_uncore_read(uncore, entry->offset_ldw);
d4225a53 2307 else if (entry->size == 2 && flags == 0)
8ed3a623
TU
2308 reg->val = intel_uncore_read16(uncore,
2309 entry->offset_ldw);
d4225a53 2310 else if (entry->size == 1 && flags == 0)
8ed3a623
TU
2311 reg->val = intel_uncore_read8(uncore,
2312 entry->offset_ldw);
d4225a53
CW
2313 else
2314 ret = -EINVAL;
2315 }
3fd3a6ff 2316
cf67c70f 2317 return ret;
907b28c5
CW
2318}
2319
1758b90e 2320/**
1d1a9774 2321 * __intel_wait_for_register_fw - wait until register matches expected state
d2d551c0 2322 * @uncore: the struct intel_uncore
1758b90e
CW
2323 * @reg: the register to read
2324 * @mask: mask to apply to register value
2325 * @value: expected value
1d1a9774
MW
2326 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
2327 * @slow_timeout_ms: slow timeout in millisecond
2328 * @out_value: optional placeholder to hold registry value
1758b90e
CW
2329 *
2330 * This routine waits until the target register @reg contains the expected
3d466cd6
DV
2331 * @value after applying the @mask, i.e. it waits until ::
2332 *
669f3f2b 2333 * (intel_uncore_read_fw(uncore, reg) & mask) == value
3d466cd6 2334 *
1d1a9774 2335 * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
6976e74b 2336 * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
84d84cb7 2337 * must be not larger than 20,0000 microseconds.
1758b90e
CW
2338 *
2339 * Note that this routine assumes the caller holds forcewake asserted, it is
2340 * not suitable for very long waits. See intel_wait_for_register() if you
2341 * wish to wait without holding forcewake for the duration (i.e. you expect
2342 * the wait to be slow).
2343 *
e4661f14 2344 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
1758b90e 2345 */
d2d551c0 2346int __intel_wait_for_register_fw(struct intel_uncore *uncore,
1d1a9774 2347 i915_reg_t reg,
3fc7d86b
MW
2348 u32 mask,
2349 u32 value,
2350 unsigned int fast_timeout_us,
2351 unsigned int slow_timeout_ms,
1d1a9774 2352 u32 *out_value)
1758b90e 2353{
b79ffa91 2354 u32 reg_value = 0;
d2d551c0 2355#define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value)
1d1a9774
MW
2356 int ret;
2357
6976e74b 2358 /* Catch any overuse of this function */
84d84cb7
CW
2359 might_sleep_if(slow_timeout_ms);
2360 GEM_BUG_ON(fast_timeout_us > 20000);
b79ffa91 2361 GEM_BUG_ON(!fast_timeout_us && !slow_timeout_ms);
6976e74b 2362
84d84cb7
CW
2363 ret = -ETIMEDOUT;
2364 if (fast_timeout_us && fast_timeout_us <= 20000)
1d1a9774 2365 ret = _wait_for_atomic(done, fast_timeout_us, 0);
ff26ffa8 2366 if (ret && slow_timeout_ms)
1d1a9774 2367 ret = wait_for(done, slow_timeout_ms);
84d84cb7 2368
1d1a9774
MW
2369 if (out_value)
2370 *out_value = reg_value;
84d84cb7 2371
1758b90e
CW
2372 return ret;
2373#undef done
2374}
2375
2376/**
23fdbdd7 2377 * __intel_wait_for_register - wait until register matches expected state
baba6e57 2378 * @uncore: the struct intel_uncore
1758b90e
CW
2379 * @reg: the register to read
2380 * @mask: mask to apply to register value
2381 * @value: expected value
23fdbdd7
SP
2382 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
2383 * @slow_timeout_ms: slow timeout in millisecond
2384 * @out_value: optional placeholder to hold registry value
1758b90e
CW
2385 *
2386 * This routine waits until the target register @reg contains the expected
3d466cd6
DV
2387 * @value after applying the @mask, i.e. it waits until ::
2388 *
54b3f0e6 2389 * (intel_uncore_read(uncore, reg) & mask) == value
3d466cd6 2390 *
1758b90e
CW
2391 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
2392 *
e4661f14 2393 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
1758b90e 2394 */
97a04e0d
DCS
2395int __intel_wait_for_register(struct intel_uncore *uncore,
2396 i915_reg_t reg,
2397 u32 mask,
2398 u32 value,
2399 unsigned int fast_timeout_us,
2400 unsigned int slow_timeout_ms,
2401 u32 *out_value)
2402{
1758b90e 2403 unsigned fw =
4319382e 2404 intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
23fdbdd7 2405 u32 reg_value;
1758b90e
CW
2406 int ret;
2407
3df82dd4 2408 might_sleep_if(slow_timeout_ms);
05646543 2409
272c7e52
DCS
2410 spin_lock_irq(&uncore->lock);
2411 intel_uncore_forcewake_get__locked(uncore, fw);
05646543 2412
d2d551c0 2413 ret = __intel_wait_for_register_fw(uncore,
05646543 2414 reg, mask, value,
23fdbdd7 2415 fast_timeout_us, 0, &reg_value);
05646543 2416
272c7e52
DCS
2417 intel_uncore_forcewake_put__locked(uncore, fw);
2418 spin_unlock_irq(&uncore->lock);
05646543 2419
3df82dd4 2420 if (ret && slow_timeout_ms)
d2d551c0
DCS
2421 ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore,
2422 reg),
23fdbdd7
SP
2423 (reg_value & mask) == value,
2424 slow_timeout_ms * 1000, 10, 1000);
2425
39806c3f
VS
2426 /* just trace the final value */
2427 trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
2428
23fdbdd7
SP
2429 if (out_value)
2430 *out_value = reg_value;
1758b90e
CW
2431
2432 return ret;
d431440c
TE
2433}
2434
2cf7bf6f 2435bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
907b28c5 2436{
0a9b2630
DCS
2437 bool ret;
2438
2439 spin_lock_irq(&uncore->debug->lock);
2440 ret = check_for_unclaimed_mmio(uncore);
2441 spin_unlock_irq(&uncore->debug->lock);
2442
2443 return ret;
907b28c5 2444}
75714940 2445
bc3b9346 2446bool
2cf7bf6f 2447intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
75714940 2448{
a167b1e1
CW
2449 bool ret = false;
2450
0a9b2630 2451 spin_lock_irq(&uncore->debug->lock);
a167b1e1 2452
0a9b2630 2453 if (unlikely(uncore->debug->unclaimed_mmio_check <= 0))
a167b1e1 2454 goto out;
75714940 2455
0a9b2630 2456 if (unlikely(check_for_unclaimed_mmio(uncore))) {
8a25c4be 2457 if (!uncore->i915->params.mmio_debug) {
d0208cfa
WK
2458 drm_dbg(&uncore->i915->drm,
2459 "Unclaimed register detected, "
2460 "enabling oneshot unclaimed register reporting. "
2461 "Please use i915.mmio_debug=N for more information.\n");
8a25c4be 2462 uncore->i915->params.mmio_debug++;
7ef4ac6e 2463 }
0a9b2630 2464 uncore->debug->unclaimed_mmio_check--;
a167b1e1 2465 ret = true;
75714940 2466 }
bc3b9346 2467
a167b1e1 2468out:
0a9b2630 2469 spin_unlock_irq(&uncore->debug->lock);
a167b1e1
CW
2470
2471 return ret;
75714940 2472}
3756685a 2473
3756685a
TU
2474/**
2475 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
2476 * a register
4319382e 2477 * @uncore: pointer to struct intel_uncore
3756685a
TU
2478 * @reg: register in question
2479 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
2480 *
2481 * Returns a set of forcewake domains required to be taken with for example
2482 * intel_uncore_forcewake_get for the specified register to be accessible in the
2483 * specified mode (read, write or read/write) with raw mmio accessors.
2484 *
2485 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
2486 * callers to do FIFO management on their own or risk losing writes.
2487 */
2488enum forcewake_domains
4319382e 2489intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
3756685a
TU
2490 i915_reg_t reg, unsigned int op)
2491{
2492 enum forcewake_domains fw_domains = 0;
2493
a9f236d1 2494 drm_WARN_ON(&uncore->i915->drm, !op);
3756685a 2495
4319382e 2496 if (!intel_uncore_has_forcewake(uncore))
895833bd
TU
2497 return 0;
2498
3756685a 2499 if (op & FW_REG_READ)
ccb2acea 2500 fw_domains = uncore->funcs.read_fw_domains(uncore, reg);
3756685a
TU
2501
2502 if (op & FW_REG_WRITE)
ccb2acea
DCS
2503 fw_domains |= uncore->funcs.write_fw_domains(uncore, reg);
2504
a9f236d1 2505 drm_WARN_ON(&uncore->i915->drm, fw_domains & ~uncore->fw_domains);
3756685a
TU
2506
2507 return fw_domains;
2508}
26e7a2a1 2509
932641f0
DCS
2510u32 intel_uncore_read_with_mcr_steering_fw(struct intel_uncore *uncore,
2511 i915_reg_t reg,
2512 int slice, int subslice)
2513{
2514 u32 mcr_mask, mcr_ss, mcr, old_mcr, val;
2515
2516 lockdep_assert_held(&uncore->lock);
2517
2518 if (GRAPHICS_VER(uncore->i915) >= 11) {
2519 mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
2520 mcr_ss = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
2521 } else {
2522 mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
2523 mcr_ss = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
2524 }
2525
2526 old_mcr = mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
2527
2528 mcr &= ~mcr_mask;
2529 mcr |= mcr_ss;
2530 intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
2531
2532 val = intel_uncore_read_fw(uncore, reg);
2533
2534 mcr &= ~mcr_mask;
2535 mcr |= old_mcr & mcr_mask;
2536
2537 intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
2538
2539 return val;
2540}
2541
2542u32 intel_uncore_read_with_mcr_steering(struct intel_uncore *uncore,
2543 i915_reg_t reg, int slice, int subslice)
2544{
2545 enum forcewake_domains fw_domains;
2546 u32 val;
2547
2548 fw_domains = intel_uncore_forcewake_for_reg(uncore, reg,
2549 FW_REG_READ);
2550 fw_domains |= intel_uncore_forcewake_for_reg(uncore,
2551 GEN8_MCR_SELECTOR,
2552 FW_REG_READ | FW_REG_WRITE);
2553
2554 spin_lock_irq(&uncore->lock);
2555 intel_uncore_forcewake_get__locked(uncore, fw_domains);
2556
2557 val = intel_uncore_read_with_mcr_steering_fw(uncore, reg, slice, subslice);
2558
2559 intel_uncore_forcewake_put__locked(uncore, fw_domains);
2560 spin_unlock_irq(&uncore->lock);
2561
2562 return val;
2563}
2564
26e7a2a1 2565#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
0757ac8f 2566#include "selftests/mock_uncore.c"
26e7a2a1
CW
2567#include "selftests/intel_uncore.c"
2568#endif