Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
[linux-2.6-block.git] / drivers / gpu / drm / i915 / intel_uncore.c
CommitLineData
907b28c5
CW
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
9ebb80e8 24#include <drm/drm_managed.h>
696173b0 25#include <linux/pm_runtime.h>
696173b0 26
202b1f4c 27#include "gt/intel_engine_regs.h"
0d6419e9 28#include "gt/intel_gt_regs.h"
bfac1e2b 29
907b28c5 30#include "i915_drv.h"
f0e204e0 31#include "i915_iosf_mbi.h"
801543b2 32#include "i915_reg.h"
a09d9a80 33#include "i915_trace.h"
cf9d2890 34#include "i915_vgpu.h"
6daccb0b 35
83e33372 36#define FORCEWAKE_ACK_TIMEOUT_MS 50
6b07b6d2 37#define GT_FIFO_TIMEOUT_MS 10
907b28c5 38
6cc5ca76 39#define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
6af5d92f 40
5716c8c6
DA
41static void
42fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
43{
44 uncore->fw_get_funcs->force_wake_get(uncore, fw_domains);
45}
46
0a9b2630 47void
639e30ee 48intel_uncore_mmio_debug_init_early(struct drm_i915_private *i915)
0a9b2630 49{
639e30ee
MR
50 spin_lock_init(&i915->mmio_debug.lock);
51 i915->mmio_debug.unclaimed_mmio_check = 1;
52
53 i915->uncore.debug = &i915->mmio_debug;
0a9b2630
DCS
54}
55
f16bfc1d 56static void mmio_debug_suspend(struct intel_uncore *uncore)
0a9b2630 57{
639e30ee
MR
58 if (!uncore->debug)
59 return;
60
f16bfc1d 61 spin_lock(&uncore->debug->lock);
0a9b2630
DCS
62
63 /* Save and disable mmio debugging for the user bypass */
f16bfc1d
MR
64 if (!uncore->debug->suspend_count++) {
65 uncore->debug->saved_mmio_check = uncore->debug->unclaimed_mmio_check;
66 uncore->debug->unclaimed_mmio_check = 0;
0a9b2630 67 }
f16bfc1d
MR
68
69 spin_unlock(&uncore->debug->lock);
0a9b2630
DCS
70}
71
f16bfc1d
MR
72static bool check_for_unclaimed_mmio(struct intel_uncore *uncore);
73
74static void mmio_debug_resume(struct intel_uncore *uncore)
0a9b2630 75{
639e30ee
MR
76 if (!uncore->debug)
77 return;
78
f16bfc1d
MR
79 spin_lock(&uncore->debug->lock);
80
81 if (!--uncore->debug->suspend_count)
82 uncore->debug->unclaimed_mmio_check = uncore->debug->saved_mmio_check;
0a9b2630 83
f16bfc1d
MR
84 if (check_for_unclaimed_mmio(uncore))
85 drm_info(&uncore->i915->drm,
86 "Invalid mmio detected during user access\n");
87
88 spin_unlock(&uncore->debug->lock);
0a9b2630
DCS
89}
90
05a2fb15
MK
91static const char * const forcewake_domain_names[] = {
92 "render",
bc33e71f 93 "gt",
05a2fb15 94 "media",
a89a70a8
DCS
95 "vdbox0",
96 "vdbox1",
97 "vdbox2",
98 "vdbox3",
bfac1e2b
MR
99 "vdbox4",
100 "vdbox5",
101 "vdbox6",
102 "vdbox7",
a89a70a8
DCS
103 "vebox0",
104 "vebox1",
bfac1e2b
MR
105 "vebox2",
106 "vebox3",
14f2f9bf 107 "gsc",
05a2fb15
MK
108};
109
110const char *
48c1026a 111intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
05a2fb15 112{
53abb679 113 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
05a2fb15
MK
114
115 if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
116 return forcewake_domain_names[id];
117
118 WARN_ON(id);
119
120 return "unknown";
121}
122
535d8d27 123#define fw_ack(d) readl((d)->reg_ack)
159367bb
DCS
124#define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set)
125#define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set)
535d8d27 126
05a2fb15 127static inline void
159367bb 128fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
907b28c5 129{
26376a7e
OM
130 /*
131 * We don't really know if the powerwell for the forcewake domain we are
132 * trying to reset here does exist at this point (engines could be fused
133 * off in ICL+), so no waiting for acks
134 */
6509dd11
RS
135 /* WaRsClearFWBitsAtReset */
136 if (GRAPHICS_VER(d->uncore->i915) >= 12)
137 fw_clear(d, 0xefff);
138 else
139 fw_clear(d, 0xffff);
907b28c5
CW
140}
141
05a2fb15
MK
142static inline void
143fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
907b28c5 144{
77adbd8f
CW
145 GEM_BUG_ON(d->uncore->fw_domains_timer & d->mask);
146 d->uncore->fw_domains_timer |= d->mask;
a57a4a67
TU
147 d->wake_count++;
148 hrtimer_start_range_ns(&d->timer,
8b0e1953 149 NSEC_PER_MSEC,
a57a4a67
TU
150 NSEC_PER_MSEC,
151 HRTIMER_MODE_REL);
907b28c5
CW
152}
153
71306303 154static inline int
535d8d27 155__wait_for_ack(const struct intel_uncore_forcewake_domain *d,
71306303
MK
156 const u32 ack,
157 const u32 value)
158{
535d8d27 159 return wait_for_atomic((fw_ack(d) & ack) == value,
71306303
MK
160 FORCEWAKE_ACK_TIMEOUT_MS);
161}
162
163static inline int
535d8d27 164wait_ack_clear(const struct intel_uncore_forcewake_domain *d,
71306303
MK
165 const u32 ack)
166{
535d8d27 167 return __wait_for_ack(d, ack, 0);
71306303
MK
168}
169
170static inline int
535d8d27 171wait_ack_set(const struct intel_uncore_forcewake_domain *d,
71306303
MK
172 const u32 ack)
173{
535d8d27 174 return __wait_for_ack(d, ack, ack);
71306303
MK
175}
176
05a2fb15 177static inline void
535d8d27 178fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
907b28c5 179{
fdd9b7dc
MR
180 if (!wait_ack_clear(d, FORCEWAKE_KERNEL))
181 return;
182
183 if (fw_ack(d) == ~0)
184 drm_err(&d->uncore->i915->drm,
185 "%s: MMIO unreliable (forcewake register returns 0xFFFFFFFF)!\n",
186 intel_uncore_forcewake_domain_to_str(d->id));
187 else
a10234fd
TU
188 drm_err(&d->uncore->i915->drm,
189 "%s: timed out waiting for forcewake ack to clear.\n",
190 intel_uncore_forcewake_domain_to_str(d->id));
fdd9b7dc
MR
191
192 add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
05a2fb15 193}
907b28c5 194
71306303
MK
195enum ack_type {
196 ACK_CLEAR = 0,
197 ACK_SET
198};
199
200static int
535d8d27 201fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d,
71306303
MK
202 const enum ack_type type)
203{
204 const u32 ack_bit = FORCEWAKE_KERNEL;
205 const u32 value = type == ACK_SET ? ack_bit : 0;
206 unsigned int pass;
207 bool ack_detected;
208
209 /*
210 * There is a possibility of driver's wake request colliding
211 * with hardware's own wake requests and that can cause
212 * hardware to not deliver the driver's ack message.
213 *
214 * Use a fallback bit toggle to kick the gpu state machine
215 * in the hope that the original ack will be delivered along with
216 * the fallback ack.
217 *
cc38cae7
OM
218 * This workaround is described in HSDES #1604254524 and it's known as:
219 * WaRsForcewakeAddDelayForAck:skl,bxt,kbl,glk,cfl,cnl,icl
220 * although the name is a bit misleading.
71306303
MK
221 */
222
223 pass = 1;
224 do {
535d8d27 225 wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK);
71306303 226
159367bb 227 fw_set(d, FORCEWAKE_KERNEL_FALLBACK);
71306303
MK
228 /* Give gt some time to relax before the polling frenzy */
229 udelay(10 * pass);
535d8d27 230 wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK);
71306303 231
535d8d27 232 ack_detected = (fw_ack(d) & ack_bit) == value;
71306303 233
159367bb 234 fw_clear(d, FORCEWAKE_KERNEL_FALLBACK);
71306303
MK
235 } while (!ack_detected && pass++ < 10);
236
a10234fd
TU
237 drm_dbg(&d->uncore->i915->drm,
238 "%s had to use fallback to %s ack, 0x%x (passes %u)\n",
239 intel_uncore_forcewake_domain_to_str(d->id),
240 type == ACK_SET ? "set" : "clear",
241 fw_ack(d),
242 pass);
71306303
MK
243
244 return ack_detected ? 0 : -ETIMEDOUT;
245}
246
247static inline void
535d8d27 248fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d)
71306303 249{
535d8d27 250 if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL)))
71306303
MK
251 return;
252
535d8d27
DCS
253 if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR))
254 fw_domain_wait_ack_clear(d);
71306303
MK
255}
256
05a2fb15 257static inline void
159367bb 258fw_domain_get(const struct intel_uncore_forcewake_domain *d)
05a2fb15 259{
159367bb 260 fw_set(d, FORCEWAKE_KERNEL);
05a2fb15 261}
907b28c5 262
05a2fb15 263static inline void
535d8d27 264fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
05a2fb15 265{
18ecc6c5 266 if (wait_ack_set(d, FORCEWAKE_KERNEL)) {
a10234fd
TU
267 drm_err(&d->uncore->i915->drm,
268 "%s: timed out waiting for forcewake ack request.\n",
269 intel_uncore_forcewake_domain_to_str(d->id));
65706203 270 add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
18ecc6c5 271 }
05a2fb15 272}
907b28c5 273
71306303 274static inline void
535d8d27 275fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d)
71306303 276{
535d8d27 277 if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL)))
71306303
MK
278 return;
279
535d8d27
DCS
280 if (fw_domain_wait_ack_with_fallback(d, ACK_SET))
281 fw_domain_wait_ack_set(d);
71306303
MK
282}
283
05a2fb15 284static inline void
159367bb 285fw_domain_put(const struct intel_uncore_forcewake_domain *d)
05a2fb15 286{
159367bb 287 fw_clear(d, FORCEWAKE_KERNEL);
907b28c5
CW
288}
289
05a2fb15 290static void
5716c8c6 291fw_domains_get_normal(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
907b28c5 292{
05a2fb15 293 struct intel_uncore_forcewake_domain *d;
d2dc94bc 294 unsigned int tmp;
907b28c5 295
535d8d27 296 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
d2dc94bc 297
f568eeee 298 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
535d8d27 299 fw_domain_wait_ack_clear(d);
159367bb 300 fw_domain_get(d);
05a2fb15 301 }
4e1176dd 302
f568eeee 303 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
535d8d27 304 fw_domain_wait_ack_set(d);
71306303 305
535d8d27 306 uncore->fw_domains_active |= fw_domains;
71306303
MK
307}
308
309static void
f568eeee 310fw_domains_get_with_fallback(struct intel_uncore *uncore,
71306303
MK
311 enum forcewake_domains fw_domains)
312{
313 struct intel_uncore_forcewake_domain *d;
314 unsigned int tmp;
315
535d8d27 316 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
71306303 317
f568eeee 318 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
535d8d27 319 fw_domain_wait_ack_clear_fallback(d);
159367bb 320 fw_domain_get(d);
71306303
MK
321 }
322
f568eeee 323 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
535d8d27 324 fw_domain_wait_ack_set_fallback(d);
b8473050 325
535d8d27 326 uncore->fw_domains_active |= fw_domains;
05a2fb15 327}
907b28c5 328
05a2fb15 329static void
f568eeee 330fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
05a2fb15
MK
331{
332 struct intel_uncore_forcewake_domain *d;
d2dc94bc
CW
333 unsigned int tmp;
334
535d8d27 335 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
907b28c5 336
f568eeee 337 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
159367bb 338 fw_domain_put(d);
b8473050 339
535d8d27 340 uncore->fw_domains_active &= ~fw_domains;
05a2fb15 341}
907b28c5 342
05a2fb15 343static void
f568eeee 344fw_domains_reset(struct intel_uncore *uncore,
577ac4bd 345 enum forcewake_domains fw_domains)
05a2fb15
MK
346{
347 struct intel_uncore_forcewake_domain *d;
d2dc94bc 348 unsigned int tmp;
05a2fb15 349
d2dc94bc 350 if (!fw_domains)
3225b2f9 351 return;
f9b3927a 352
535d8d27 353 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
d2dc94bc 354
f568eeee 355 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
159367bb 356 fw_domain_reset(d);
05a2fb15
MK
357}
358
6ebc9692 359static inline u32 gt_thread_status(struct intel_uncore *uncore)
a5b22b5e
CW
360{
361 u32 val;
362
6cc5ca76 363 val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG);
a5b22b5e
CW
364 val &= GEN6_GT_THREAD_STATUS_CORE_MASK;
365
366 return val;
367}
368
6ebc9692 369static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
05a2fb15 370{
a5b22b5e
CW
371 /*
372 * w/a for a sporadic read returning 0 by waiting for the GT
05a2fb15
MK
373 * thread to wake up.
374 */
a9f236d1
PB
375 drm_WARN_ONCE(&uncore->i915->drm,
376 wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
377 "GT thread status wait timed out\n");
05a2fb15
MK
378}
379
f568eeee 380static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
48c1026a 381 enum forcewake_domains fw_domains)
05a2fb15 382{
5def925d 383 fw_domains_get_normal(uncore, fw_domains);
907b28c5 384
05a2fb15 385 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
6ebc9692 386 __gen6_gt_wait_for_thread_c0(uncore);
907b28c5
CW
387}
388
6ebc9692 389static inline u32 fifo_free_entries(struct intel_uncore *uncore)
c32e3788 390{
6cc5ca76 391 u32 count = __raw_uncore_read32(uncore, GTFIFOCTL);
c32e3788
DG
392
393 return count & GT_FIFO_FREE_ENTRIES_MASK;
394}
395
6ebc9692 396static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
907b28c5 397{
6b07b6d2 398 u32 n;
907b28c5 399
5135d64b
D
400 /* On VLV, FIFO will be shared by both SW and HW.
401 * So, we need to read the FREE_ENTRIES everytime */
01385758 402 if (IS_VALLEYVIEW(uncore->i915))
6ebc9692 403 n = fifo_free_entries(uncore);
6b07b6d2 404 else
272c7e52 405 n = uncore->fifo_count;
6b07b6d2
MK
406
407 if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
6ebc9692 408 if (wait_for_atomic((n = fifo_free_entries(uncore)) >
6b07b6d2
MK
409 GT_FIFO_NUM_RESERVED_ENTRIES,
410 GT_FIFO_TIMEOUT_MS)) {
d0208cfa
WK
411 drm_dbg(&uncore->i915->drm,
412 "GT_FIFO timeout, entries: %u\n", n);
6b07b6d2 413 return;
907b28c5 414 }
907b28c5 415 }
907b28c5 416
272c7e52 417 uncore->fifo_count = n - 1;
907b28c5
CW
418}
419
a57a4a67
TU
420static enum hrtimer_restart
421intel_uncore_fw_release_timer(struct hrtimer *timer)
38cff0b1 422{
a57a4a67
TU
423 struct intel_uncore_forcewake_domain *domain =
424 container_of(timer, struct intel_uncore_forcewake_domain, timer);
f833cdb0 425 struct intel_uncore *uncore = domain->uncore;
b2cff0db 426 unsigned long irqflags;
38cff0b1 427
eb17af67 428 assert_rpm_device_not_suspended(uncore->rpm);
38cff0b1 429
c9e0c6da
CW
430 if (xchg(&domain->active, false))
431 return HRTIMER_RESTART;
432
f568eeee 433 spin_lock_irqsave(&uncore->lock, irqflags);
b2cff0db 434
77adbd8f
CW
435 uncore->fw_domains_timer &= ~domain->mask;
436
437 GEM_BUG_ON(!domain->wake_count);
b8473050 438 if (--domain->wake_count == 0)
5716c8c6 439 fw_domains_put(uncore, domain->mask);
b2cff0db 440
f568eeee 441 spin_unlock_irqrestore(&uncore->lock, irqflags);
a57a4a67
TU
442
443 return HRTIMER_NORESTART;
38cff0b1
ZW
444}
445
a5266db4 446/* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
d60996ab 447static unsigned int
f568eeee 448intel_uncore_forcewake_reset(struct intel_uncore *uncore)
38cff0b1 449{
48c1026a 450 unsigned long irqflags;
b2cff0db 451 struct intel_uncore_forcewake_domain *domain;
48c1026a 452 int retry_count = 100;
003342a5 453 enum forcewake_domains fw, active_domains;
38cff0b1 454
a5266db4
HG
455 iosf_mbi_assert_punit_acquired();
456
b2cff0db
CW
457 /* Hold uncore.lock across reset to prevent any register access
458 * with forcewake not set correctly. Wait until all pending
459 * timers are run before holding.
460 */
461 while (1) {
d2dc94bc
CW
462 unsigned int tmp;
463
b2cff0db 464 active_domains = 0;
38cff0b1 465
f568eeee 466 for_each_fw_domain(domain, uncore, tmp) {
c9e0c6da 467 smp_store_mb(domain->active, false);
a57a4a67 468 if (hrtimer_cancel(&domain->timer) == 0)
b2cff0db 469 continue;
38cff0b1 470
a57a4a67 471 intel_uncore_fw_release_timer(&domain->timer);
b2cff0db 472 }
aec347ab 473
f568eeee 474 spin_lock_irqsave(&uncore->lock, irqflags);
b2ec142c 475
f568eeee 476 for_each_fw_domain(domain, uncore, tmp) {
a57a4a67 477 if (hrtimer_active(&domain->timer))
33c582c1 478 active_domains |= domain->mask;
b2cff0db 479 }
3123fcaf 480
b2cff0db
CW
481 if (active_domains == 0)
482 break;
aec347ab 483
b2cff0db 484 if (--retry_count == 0) {
d0208cfa 485 drm_err(&uncore->i915->drm, "Timed out waiting for forcewake timers to finish\n");
b2cff0db
CW
486 break;
487 }
0294ae7b 488
f568eeee 489 spin_unlock_irqrestore(&uncore->lock, irqflags);
b2cff0db
CW
490 cond_resched();
491 }
0294ae7b 492
a9f236d1 493 drm_WARN_ON(&uncore->i915->drm, active_domains);
b2cff0db 494
f568eeee 495 fw = uncore->fw_domains_active;
b2cff0db 496 if (fw)
5716c8c6 497 fw_domains_put(uncore, fw);
ef46e0d2 498
f568eeee
DCS
499 fw_domains_reset(uncore, uncore->fw_domains);
500 assert_forcewakes_inactive(uncore);
b2cff0db 501
f568eeee 502 spin_unlock_irqrestore(&uncore->lock, irqflags);
d60996ab
CW
503
504 return fw; /* track the lost user forcewake domains */
ef46e0d2
DV
505}
506
8a47eb19 507static bool
6ebc9692 508fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore)
8a47eb19
MK
509{
510 u32 dbg;
511
6cc5ca76 512 dbg = __raw_uncore_read32(uncore, FPGA_DBG);
8a47eb19
MK
513 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
514 return false;
515
29b6f88d
MR
516 /*
517 * Bugs in PCI programming (or failing hardware) can occasionally cause
518 * us to lose access to the MMIO BAR. When this happens, register
519 * reads will come back with 0xFFFFFFFF for every register and things
520 * go bad very quickly. Let's try to detect that special case and at
521 * least try to print a more informative message about what has
522 * happened.
523 *
524 * During normal operation the FPGA_DBG register has several unused
525 * bits that will always read back as 0's so we can use them as canaries
526 * to recognize when MMIO accesses are just busted.
527 */
528 if (unlikely(dbg == ~0))
529 drm_err(&uncore->i915->drm,
530 "Lost access to MMIO BAR; all registers now read back as 0xFFFFFFFF!\n");
531
6cc5ca76 532 __raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
8a47eb19
MK
533
534 return true;
535}
536
8ac3e1bb 537static bool
6ebc9692 538vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore)
8ac3e1bb
MK
539{
540 u32 cer;
541
6cc5ca76 542 cer = __raw_uncore_read32(uncore, CLAIM_ER);
8ac3e1bb
MK
543 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
544 return false;
545
6cc5ca76 546 __raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR);
8ac3e1bb
MK
547
548 return true;
549}
550
a338908c 551static bool
6ebc9692 552gen6_check_for_fifo_debug(struct intel_uncore *uncore)
a338908c
MK
553{
554 u32 fifodbg;
555
6cc5ca76 556 fifodbg = __raw_uncore_read32(uncore, GTFIFODBG);
a338908c
MK
557
558 if (unlikely(fifodbg)) {
d0208cfa 559 drm_dbg(&uncore->i915->drm, "GTFIFODBG = 0x08%x\n", fifodbg);
6cc5ca76 560 __raw_uncore_write32(uncore, GTFIFODBG, fifodbg);
a338908c
MK
561 }
562
563 return fifodbg;
564}
565
8ac3e1bb 566static bool
2cf7bf6f 567check_for_unclaimed_mmio(struct intel_uncore *uncore)
8ac3e1bb 568{
a338908c
MK
569 bool ret = false;
570
0a9b2630
DCS
571 lockdep_assert_held(&uncore->debug->lock);
572
573 if (uncore->debug->suspend_count)
574 return false;
575
2cf7bf6f 576 if (intel_uncore_has_fpga_dbg_unclaimed(uncore))
6ebc9692 577 ret |= fpga_check_for_unclaimed_mmio(uncore);
8ac3e1bb 578
2cf7bf6f 579 if (intel_uncore_has_dbg_unclaimed(uncore))
6ebc9692 580 ret |= vlv_check_for_unclaimed_mmio(uncore);
a338908c 581
2cf7bf6f 582 if (intel_uncore_has_fifo(uncore))
6ebc9692 583 ret |= gen6_check_for_fifo_debug(uncore);
8ac3e1bb 584
a338908c 585 return ret;
8ac3e1bb
MK
586}
587
2e81bc61
DCS
588static void forcewake_early_sanitize(struct intel_uncore *uncore,
589 unsigned int restore_forcewake)
f9b3927a 590{
2e81bc61 591 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
907b28c5 592
a04f90a3 593 /* WaDisableShadowRegForCpd:chv */
01385758 594 if (IS_CHERRYVIEW(uncore->i915)) {
6cc5ca76
DCS
595 __raw_uncore_write32(uncore, GTFIFOCTL,
596 __raw_uncore_read32(uncore, GTFIFOCTL) |
597 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
598 GT_FIFO_CTL_RC6_POLICY_STALL);
a04f90a3
D
599 }
600
a5266db4 601 iosf_mbi_punit_acquire();
f7de5027 602 intel_uncore_forcewake_reset(uncore);
d60996ab 603 if (restore_forcewake) {
f7de5027 604 spin_lock_irq(&uncore->lock);
5716c8c6 605 fw_domains_get(uncore, restore_forcewake);
f7de5027 606
2cf7bf6f 607 if (intel_uncore_has_fifo(uncore))
6ebc9692 608 uncore->fifo_count = fifo_free_entries(uncore);
f7de5027 609 spin_unlock_irq(&uncore->lock);
d60996ab 610 }
a5266db4 611 iosf_mbi_punit_release();
521198a2
MK
612}
613
f7de5027 614void intel_uncore_suspend(struct intel_uncore *uncore)
ed493883 615{
2e81bc61
DCS
616 if (!intel_uncore_has_forcewake(uncore))
617 return;
618
a5266db4
HG
619 iosf_mbi_punit_acquire();
620 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
f7de5027
DCS
621 &uncore->pmic_bus_access_nb);
622 uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore);
a5266db4 623 iosf_mbi_punit_release();
68f60946
HG
624}
625
f7de5027 626void intel_uncore_resume_early(struct intel_uncore *uncore)
68f60946 627{
d60996ab
CW
628 unsigned int restore_forcewake;
629
2e81bc61 630 if (intel_uncore_unclaimed_mmio(uncore))
d0208cfa 631 drm_dbg(&uncore->i915->drm, "unclaimed mmio detected on resume, clearing\n");
2e81bc61
DCS
632
633 if (!intel_uncore_has_forcewake(uncore))
634 return;
635
f7de5027 636 restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
2e81bc61 637 forcewake_early_sanitize(uncore, restore_forcewake);
d60996ab 638
f7de5027 639 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
ed493883
ID
640}
641
f7de5027 642void intel_uncore_runtime_resume(struct intel_uncore *uncore)
bedf4d79 643{
2e81bc61
DCS
644 if (!intel_uncore_has_forcewake(uncore))
645 return;
646
f7de5027 647 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
bedf4d79
HG
648}
649
f568eeee 650static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
a6111f7b
CW
651 enum forcewake_domains fw_domains)
652{
653 struct intel_uncore_forcewake_domain *domain;
d2dc94bc 654 unsigned int tmp;
a6111f7b 655
f568eeee 656 fw_domains &= uncore->fw_domains;
a6111f7b 657
f568eeee 658 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
c9e0c6da 659 if (domain->wake_count++) {
33c582c1 660 fw_domains &= ~domain->mask;
c9e0c6da
CW
661 domain->active = true;
662 }
663 }
a6111f7b 664
b8473050 665 if (fw_domains)
5716c8c6 666 fw_domains_get(uncore, fw_domains);
a6111f7b
CW
667}
668
59bad947
MK
669/**
670 * intel_uncore_forcewake_get - grab forcewake domain references
3ceea6a1 671 * @uncore: the intel_uncore structure
59bad947
MK
672 * @fw_domains: forcewake domains to get reference on
673 *
674 * This function can be used get GT's forcewake domain references.
675 * Normal register access will handle the forcewake domains automatically.
676 * However if some sequence requires the GT to not power down a particular
677 * forcewake domains this function should be called at the beginning of the
678 * sequence. And subsequently the reference should be dropped by symmetric
679 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
680 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
907b28c5 681 */
3ceea6a1 682void intel_uncore_forcewake_get(struct intel_uncore *uncore,
48c1026a 683 enum forcewake_domains fw_domains)
907b28c5
CW
684{
685 unsigned long irqflags;
686
5716c8c6 687 if (!uncore->fw_get_funcs)
ab484f8f
BW
688 return;
689
87b391b9 690 assert_rpm_wakelock_held(uncore->rpm);
c8c8fb33 691
f568eeee
DCS
692 spin_lock_irqsave(&uncore->lock, irqflags);
693 __intel_uncore_forcewake_get(uncore, fw_domains);
694 spin_unlock_irqrestore(&uncore->lock, irqflags);
907b28c5
CW
695}
696
d7a133d8
CW
697/**
698 * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
3ceea6a1 699 * @uncore: the intel_uncore structure
d7a133d8
CW
700 *
701 * This function is a wrapper around intel_uncore_forcewake_get() to acquire
702 * the GT powerwell and in the process disable our debugging for the
703 * duration of userspace's bypass.
704 */
3ceea6a1 705void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
d7a133d8 706{
f568eeee 707 spin_lock_irq(&uncore->lock);
0a9b2630 708 if (!uncore->user_forcewake_count++) {
3ceea6a1 709 intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
f16bfc1d 710 mmio_debug_suspend(uncore);
d7a133d8 711 }
f568eeee 712 spin_unlock_irq(&uncore->lock);
d7a133d8
CW
713}
714
715/**
716 * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
3ceea6a1 717 * @uncore: the intel_uncore structure
d7a133d8
CW
718 *
719 * This function complements intel_uncore_forcewake_user_get() and releases
720 * the GT powerwell taken on behalf of the userspace bypass.
721 */
3ceea6a1 722void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
d7a133d8 723{
f568eeee 724 spin_lock_irq(&uncore->lock);
0a9b2630 725 if (!--uncore->user_forcewake_count) {
f16bfc1d 726 mmio_debug_resume(uncore);
3ceea6a1 727 intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
d7a133d8 728 }
f568eeee 729 spin_unlock_irq(&uncore->lock);
d7a133d8
CW
730}
731
59bad947 732/**
a6111f7b 733 * intel_uncore_forcewake_get__locked - grab forcewake domain references
3ceea6a1 734 * @uncore: the intel_uncore structure
a6111f7b 735 * @fw_domains: forcewake domains to get reference on
59bad947 736 *
a6111f7b
CW
737 * See intel_uncore_forcewake_get(). This variant places the onus
738 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
907b28c5 739 */
3ceea6a1 740void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
a6111f7b
CW
741 enum forcewake_domains fw_domains)
742{
f568eeee
DCS
743 lockdep_assert_held(&uncore->lock);
744
5716c8c6 745 if (!uncore->fw_get_funcs)
a6111f7b
CW
746 return;
747
f568eeee 748 __intel_uncore_forcewake_get(uncore, fw_domains);
a6111f7b
CW
749}
750
f568eeee 751static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
7938d615
TU
752 enum forcewake_domains fw_domains,
753 bool delayed)
907b28c5 754{
b2cff0db 755 struct intel_uncore_forcewake_domain *domain;
d2dc94bc 756 unsigned int tmp;
907b28c5 757
f568eeee 758 fw_domains &= uncore->fw_domains;
b2cff0db 759
f568eeee 760 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
77adbd8f 761 GEM_BUG_ON(!domain->wake_count);
b2cff0db 762
c9e0c6da
CW
763 if (--domain->wake_count) {
764 domain->active = true;
b2cff0db 765 continue;
c9e0c6da 766 }
b2cff0db 767
7938d615
TU
768 if (delayed &&
769 !(domain->uncore->fw_domains_timer & domain->mask))
770 fw_domain_arm_timer(domain);
771 else
772 fw_domains_put(uncore, domain->mask);
aec347ab 773 }
a6111f7b 774}
dc9fb09c 775
a6111f7b
CW
776/**
777 * intel_uncore_forcewake_put - release a forcewake domain reference
3ceea6a1 778 * @uncore: the intel_uncore structure
a6111f7b
CW
779 * @fw_domains: forcewake domains to put references
780 *
781 * This function drops the device-level forcewakes for specified
782 * domains obtained by intel_uncore_forcewake_get().
783 */
3ceea6a1 784void intel_uncore_forcewake_put(struct intel_uncore *uncore,
a6111f7b
CW
785 enum forcewake_domains fw_domains)
786{
787 unsigned long irqflags;
788
5716c8c6 789 if (!uncore->fw_get_funcs)
a6111f7b
CW
790 return;
791
f568eeee 792 spin_lock_irqsave(&uncore->lock, irqflags);
7938d615
TU
793 __intel_uncore_forcewake_put(uncore, fw_domains, false);
794 spin_unlock_irqrestore(&uncore->lock, irqflags);
795}
796
797void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore,
798 enum forcewake_domains fw_domains)
799{
800 unsigned long irqflags;
801
802 if (!uncore->fw_get_funcs)
803 return;
804
805 spin_lock_irqsave(&uncore->lock, irqflags);
806 __intel_uncore_forcewake_put(uncore, fw_domains, true);
f568eeee 807 spin_unlock_irqrestore(&uncore->lock, irqflags);
907b28c5
CW
808}
809
032d992d
CW
810/**
811 * intel_uncore_forcewake_flush - flush the delayed release
812 * @uncore: the intel_uncore structure
813 * @fw_domains: forcewake domains to flush
814 */
815void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
816 enum forcewake_domains fw_domains)
817{
818 struct intel_uncore_forcewake_domain *domain;
819 unsigned int tmp;
820
5716c8c6 821 if (!uncore->fw_get_funcs)
032d992d
CW
822 return;
823
824 fw_domains &= uncore->fw_domains;
825 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
826 WRITE_ONCE(domain->active, false);
827 if (hrtimer_cancel(&domain->timer))
828 intel_uncore_fw_release_timer(&domain->timer);
829 }
830}
831
a6111f7b 832/**
449a0ef5 833 * intel_uncore_forcewake_put__locked - release forcewake domain references
3ceea6a1 834 * @uncore: the intel_uncore structure
449a0ef5 835 * @fw_domains: forcewake domains to put references
a6111f7b
CW
836 *
837 * See intel_uncore_forcewake_put(). This variant places the onus
838 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
839 */
3ceea6a1 840void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
a6111f7b
CW
841 enum forcewake_domains fw_domains)
842{
f568eeee
DCS
843 lockdep_assert_held(&uncore->lock);
844
5716c8c6 845 if (!uncore->fw_get_funcs)
a6111f7b
CW
846 return;
847
7938d615 848 __intel_uncore_forcewake_put(uncore, fw_domains, false);
a6111f7b
CW
849}
850
f568eeee 851void assert_forcewakes_inactive(struct intel_uncore *uncore)
e998c40f 852{
5716c8c6 853 if (!uncore->fw_get_funcs)
e998c40f
PZ
854 return;
855
a9f236d1
PB
856 drm_WARN(&uncore->i915->drm, uncore->fw_domains_active,
857 "Expected all fw_domains to be inactive, but %08x are still on\n",
858 uncore->fw_domains_active);
67e64564
CW
859}
860
f568eeee 861void assert_forcewakes_active(struct intel_uncore *uncore,
67e64564
CW
862 enum forcewake_domains fw_domains)
863{
b7dc9395
CW
864 struct intel_uncore_forcewake_domain *domain;
865 unsigned int tmp;
866
867 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
868 return;
869
5716c8c6 870 if (!uncore->fw_get_funcs)
67e64564
CW
871 return;
872
15e7facb
CW
873 spin_lock_irq(&uncore->lock);
874
87b391b9 875 assert_rpm_wakelock_held(uncore->rpm);
67e64564 876
f568eeee 877 fw_domains &= uncore->fw_domains;
a9f236d1
PB
878 drm_WARN(&uncore->i915->drm, fw_domains & ~uncore->fw_domains_active,
879 "Expected %08x fw_domains to be active, but %08x are off\n",
880 fw_domains, fw_domains & ~uncore->fw_domains_active);
b7dc9395
CW
881
882 /*
883 * Check that the caller has an explicit wakeref and we don't mistake
884 * it for the auto wakeref.
885 */
b7dc9395 886 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
badf1f27 887 unsigned int actual = READ_ONCE(domain->wake_count);
b7dc9395
CW
888 unsigned int expect = 1;
889
77adbd8f 890 if (uncore->fw_domains_timer & domain->mask)
b7dc9395
CW
891 expect++; /* pending automatic release */
892
a9f236d1
PB
893 if (drm_WARN(&uncore->i915->drm, actual < expect,
894 "Expected domain %d to be held awake by caller, count=%d\n",
895 domain->id, actual))
b7dc9395
CW
896 break;
897 }
15e7facb
CW
898
899 spin_unlock_irq(&uncore->lock);
e998c40f
PZ
900}
901
14f2f9bf
MR
902/*
903 * We give fast paths for the really cool registers. The second range includes
904 * media domains (and the GSC starting from Xe_LPM+)
905 */
aef02736
MR
906#define NEEDS_FORCE_WAKE(reg) ({ \
907 u32 __reg = (reg); \
14f2f9bf 908 __reg < 0x40000 || __reg >= 0x116000; \
6863b76c
TU
909})
910
9480dbf0 911static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
91e630b9 912{
91e630b9
TU
913 if (offset < entry->start)
914 return -1;
915 else if (offset > entry->end)
916 return 1;
917 else
918 return 0;
919}
920
9480dbf0
TU
921/* Copied and "macroized" from lib/bsearch.c */
922#define BSEARCH(key, base, num, cmp) ({ \
923 unsigned int start__ = 0, end__ = (num); \
924 typeof(base) result__ = NULL; \
925 while (start__ < end__) { \
926 unsigned int mid__ = start__ + (end__ - start__) / 2; \
927 int ret__ = (cmp)((key), (base) + mid__); \
928 if (ret__ < 0) { \
929 end__ = mid__; \
930 } else if (ret__ > 0) { \
931 start__ = mid__ + 1; \
932 } else { \
933 result__ = (base) + mid__; \
934 break; \
935 } \
936 } \
937 result__; \
938})
939
9fc1117c 940static enum forcewake_domains
cb7ee690 941find_fw_domain(struct intel_uncore *uncore, u32 offset)
9fc1117c 942{
9480dbf0 943 const struct intel_forcewake_range *entry;
9fc1117c 944
eefac38a
MR
945 if (IS_GSI_REG(offset))
946 offset += uncore->gsi_offset;
947
9480dbf0 948 entry = BSEARCH(offset,
cb7ee690
DCS
949 uncore->fw_domains_table,
950 uncore->fw_domains_table_entries,
91e630b9 951 fw_range_cmp);
38fb6a40 952
99191427
JL
953 if (!entry)
954 return 0;
955
a89a70a8
DCS
956 /*
957 * The list of FW domains depends on the SKU in gen11+ so we
958 * can't determine it statically. We use FORCEWAKE_ALL and
959 * translate it here to the list of available domains.
960 */
961 if (entry->domains == FORCEWAKE_ALL)
cb7ee690 962 return uncore->fw_domains;
a89a70a8 963
a9f236d1
PB
964 drm_WARN(&uncore->i915->drm, entry->domains & ~uncore->fw_domains,
965 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
966 entry->domains & ~uncore->fw_domains, offset);
99191427
JL
967
968 return entry->domains;
9fc1117c
TU
969}
970
d32e8ed9
MR
971/*
972 * Shadowed register tables describe special register ranges that i915 is
973 * allowed to write to without acquiring forcewake. If these registers' power
974 * wells are down, the hardware will save values written by i915 to a shadow
975 * copy and automatically transfer them into the real register the next time
976 * the power well is woken up. Shadowing only applies to writes; forcewake
977 * must still be acquired when reading from registers in these ranges.
978 *
979 * The documentation for shadowed registers is somewhat spotty on older
980 * platforms. However missing registers from these lists is non-fatal; it just
981 * means we'll wake up the hardware for some register accesses where we didn't
982 * really need to.
983 *
984 * The ranges listed in these tables must be sorted by offset.
985 *
986 * When adding new tables here, please also add them to
987 * intel_shadow_table_check() in selftests/intel_uncore.c so that they will be
988 * scanned for obvious mistakes or typos by the selftests.
989 */
6863b76c 990
f9d56cd6
MR
991static const struct i915_range gen8_shadowed_regs[] = {
992 { .start = 0x2030, .end = 0x2030 },
993 { .start = 0xA008, .end = 0xA00C },
994 { .start = 0x12030, .end = 0x12030 },
995 { .start = 0x1a030, .end = 0x1a030 },
996 { .start = 0x22030, .end = 0x22030 },
6863b76c
TU
997};
998
f9d56cd6
MR
999static const struct i915_range gen11_shadowed_regs[] = {
1000 { .start = 0x2030, .end = 0x2030 },
1001 { .start = 0x2550, .end = 0x2550 },
1002 { .start = 0xA008, .end = 0xA00C },
1003 { .start = 0x22030, .end = 0x22030 },
0bb50de1
MR
1004 { .start = 0x22230, .end = 0x22230 },
1005 { .start = 0x22510, .end = 0x22550 },
f9d56cd6 1006 { .start = 0x1C0030, .end = 0x1C0030 },
0bb50de1
MR
1007 { .start = 0x1C0230, .end = 0x1C0230 },
1008 { .start = 0x1C0510, .end = 0x1C0550 },
f9d56cd6 1009 { .start = 0x1C4030, .end = 0x1C4030 },
0bb50de1
MR
1010 { .start = 0x1C4230, .end = 0x1C4230 },
1011 { .start = 0x1C4510, .end = 0x1C4550 },
f9d56cd6 1012 { .start = 0x1C8030, .end = 0x1C8030 },
0bb50de1
MR
1013 { .start = 0x1C8230, .end = 0x1C8230 },
1014 { .start = 0x1C8510, .end = 0x1C8550 },
f9d56cd6 1015 { .start = 0x1D0030, .end = 0x1D0030 },
0bb50de1
MR
1016 { .start = 0x1D0230, .end = 0x1D0230 },
1017 { .start = 0x1D0510, .end = 0x1D0550 },
f9d56cd6 1018 { .start = 0x1D4030, .end = 0x1D4030 },
0bb50de1
MR
1019 { .start = 0x1D4230, .end = 0x1D4230 },
1020 { .start = 0x1D4510, .end = 0x1D4550 },
f9d56cd6 1021 { .start = 0x1D8030, .end = 0x1D8030 },
0bb50de1
MR
1022 { .start = 0x1D8230, .end = 0x1D8230 },
1023 { .start = 0x1D8510, .end = 0x1D8550 },
a89a70a8
DCS
1024};
1025
f9d56cd6
MR
1026static const struct i915_range gen12_shadowed_regs[] = {
1027 { .start = 0x2030, .end = 0x2030 },
5798a769 1028 { .start = 0x2510, .end = 0x2550 },
f9d56cd6 1029 { .start = 0xA008, .end = 0xA00C },
5798a769
MR
1030 { .start = 0xA188, .end = 0xA188 },
1031 { .start = 0xA278, .end = 0xA278 },
1032 { .start = 0xA540, .end = 0xA56C },
1033 { .start = 0xC4C8, .end = 0xC4C8 },
1034 { .start = 0xC4D4, .end = 0xC4D4 },
1035 { .start = 0xC600, .end = 0xC600 },
f9d56cd6 1036 { .start = 0x22030, .end = 0x22030 },
5798a769 1037 { .start = 0x22510, .end = 0x22550 },
f9d56cd6 1038 { .start = 0x1C0030, .end = 0x1C0030 },
5798a769 1039 { .start = 0x1C0510, .end = 0x1C0550 },
f9d56cd6 1040 { .start = 0x1C4030, .end = 0x1C4030 },
5798a769 1041 { .start = 0x1C4510, .end = 0x1C4550 },
f9d56cd6 1042 { .start = 0x1C8030, .end = 0x1C8030 },
5798a769 1043 { .start = 0x1C8510, .end = 0x1C8550 },
f9d56cd6 1044 { .start = 0x1D0030, .end = 0x1D0030 },
5798a769 1045 { .start = 0x1D0510, .end = 0x1D0550 },
f9d56cd6 1046 { .start = 0x1D4030, .end = 0x1D4030 },
5798a769 1047 { .start = 0x1D4510, .end = 0x1D4550 },
f9d56cd6 1048 { .start = 0x1D8030, .end = 0x1D8030 },
5798a769 1049 { .start = 0x1D8510, .end = 0x1D8550 },
bfac1e2b 1050
5c5c40e2
MR
1051 /*
1052 * The rest of these ranges are specific to Xe_HP and beyond, but
1053 * are reserved/unused ranges on earlier gen12 platforms, so they can
1054 * be safely added to the gen12 table.
1055 */
f9d56cd6 1056 { .start = 0x1E0030, .end = 0x1E0030 },
5c5c40e2 1057 { .start = 0x1E0510, .end = 0x1E0550 },
f9d56cd6 1058 { .start = 0x1E4030, .end = 0x1E4030 },
5c5c40e2 1059 { .start = 0x1E4510, .end = 0x1E4550 },
f9d56cd6 1060 { .start = 0x1E8030, .end = 0x1E8030 },
5c5c40e2 1061 { .start = 0x1E8510, .end = 0x1E8550 },
f9d56cd6 1062 { .start = 0x1F0030, .end = 0x1F0030 },
5c5c40e2 1063 { .start = 0x1F0510, .end = 0x1F0550 },
f9d56cd6 1064 { .start = 0x1F4030, .end = 0x1F4030 },
5c5c40e2 1065 { .start = 0x1F4510, .end = 0x1F4550 },
f9d56cd6 1066 { .start = 0x1F8030, .end = 0x1F8030 },
5c5c40e2 1067 { .start = 0x1F8510, .end = 0x1F8550 },
bfac1e2b
MR
1068};
1069
c74e66d4
MR
1070static const struct i915_range dg2_shadowed_regs[] = {
1071 { .start = 0x2030, .end = 0x2030 },
1072 { .start = 0x2510, .end = 0x2550 },
1073 { .start = 0xA008, .end = 0xA00C },
1074 { .start = 0xA188, .end = 0xA188 },
1075 { .start = 0xA278, .end = 0xA278 },
1076 { .start = 0xA540, .end = 0xA56C },
1077 { .start = 0xC4C8, .end = 0xC4C8 },
1078 { .start = 0xC4E0, .end = 0xC4E0 },
1079 { .start = 0xC600, .end = 0xC600 },
1080 { .start = 0xC658, .end = 0xC658 },
1081 { .start = 0x22030, .end = 0x22030 },
1082 { .start = 0x22510, .end = 0x22550 },
1083 { .start = 0x1C0030, .end = 0x1C0030 },
1084 { .start = 0x1C0510, .end = 0x1C0550 },
1085 { .start = 0x1C4030, .end = 0x1C4030 },
1086 { .start = 0x1C4510, .end = 0x1C4550 },
1087 { .start = 0x1C8030, .end = 0x1C8030 },
1088 { .start = 0x1C8510, .end = 0x1C8550 },
1089 { .start = 0x1D0030, .end = 0x1D0030 },
1090 { .start = 0x1D0510, .end = 0x1D0550 },
1091 { .start = 0x1D4030, .end = 0x1D4030 },
1092 { .start = 0x1D4510, .end = 0x1D4550 },
1093 { .start = 0x1D8030, .end = 0x1D8030 },
1094 { .start = 0x1D8510, .end = 0x1D8550 },
1095 { .start = 0x1E0030, .end = 0x1E0030 },
1096 { .start = 0x1E0510, .end = 0x1E0550 },
1097 { .start = 0x1E4030, .end = 0x1E4030 },
1098 { .start = 0x1E4510, .end = 0x1E4550 },
1099 { .start = 0x1E8030, .end = 0x1E8030 },
1100 { .start = 0x1E8510, .end = 0x1E8550 },
1101 { .start = 0x1F0030, .end = 0x1F0030 },
1102 { .start = 0x1F0510, .end = 0x1F0550 },
1103 { .start = 0x1F4030, .end = 0x1F4030 },
1104 { .start = 0x1F4510, .end = 0x1F4550 },
1105 { .start = 0x1F8030, .end = 0x1F8030 },
1106 { .start = 0x1F8510, .end = 0x1F8550 },
cf82d9dd
MT
1107};
1108
fb289464
MR
1109static const struct i915_range pvc_shadowed_regs[] = {
1110 { .start = 0x2030, .end = 0x2030 },
1111 { .start = 0x2510, .end = 0x2550 },
1112 { .start = 0xA008, .end = 0xA00C },
1113 { .start = 0xA188, .end = 0xA188 },
1114 { .start = 0xA278, .end = 0xA278 },
1115 { .start = 0xA540, .end = 0xA56C },
1116 { .start = 0xC4C8, .end = 0xC4C8 },
1117 { .start = 0xC4E0, .end = 0xC4E0 },
1118 { .start = 0xC600, .end = 0xC600 },
1119 { .start = 0xC658, .end = 0xC658 },
1120 { .start = 0x22030, .end = 0x22030 },
1121 { .start = 0x22510, .end = 0x22550 },
1122 { .start = 0x1C0030, .end = 0x1C0030 },
1123 { .start = 0x1C0510, .end = 0x1C0550 },
1124 { .start = 0x1C4030, .end = 0x1C4030 },
1125 { .start = 0x1C4510, .end = 0x1C4550 },
1126 { .start = 0x1C8030, .end = 0x1C8030 },
1127 { .start = 0x1C8510, .end = 0x1C8550 },
1128 { .start = 0x1D0030, .end = 0x1D0030 },
1129 { .start = 0x1D0510, .end = 0x1D0550 },
1130 { .start = 0x1D4030, .end = 0x1D4030 },
1131 { .start = 0x1D4510, .end = 0x1D4550 },
1132 { .start = 0x1D8030, .end = 0x1D8030 },
1133 { .start = 0x1D8510, .end = 0x1D8550 },
1134 { .start = 0x1E0030, .end = 0x1E0030 },
1135 { .start = 0x1E0510, .end = 0x1E0550 },
1136 { .start = 0x1E4030, .end = 0x1E4030 },
1137 { .start = 0x1E4510, .end = 0x1E4550 },
1138 { .start = 0x1E8030, .end = 0x1E8030 },
1139 { .start = 0x1E8510, .end = 0x1E8550 },
1140 { .start = 0x1F0030, .end = 0x1F0030 },
1141 { .start = 0x1F0510, .end = 0x1F0550 },
1142 { .start = 0x1F4030, .end = 0x1F4030 },
1143 { .start = 0x1F4510, .end = 0x1F4550 },
1144 { .start = 0x1F8030, .end = 0x1F8030 },
1145 { .start = 0x1F8510, .end = 0x1F8550 },
1146};
1147
14f2f9bf
MR
1148static const struct i915_range mtl_shadowed_regs[] = {
1149 { .start = 0x2030, .end = 0x2030 },
1150 { .start = 0x2510, .end = 0x2550 },
1151 { .start = 0xA008, .end = 0xA00C },
1152 { .start = 0xA188, .end = 0xA188 },
1153 { .start = 0xA278, .end = 0xA278 },
1154 { .start = 0xA540, .end = 0xA56C },
1155 { .start = 0xC050, .end = 0xC050 },
1156 { .start = 0xC340, .end = 0xC340 },
1157 { .start = 0xC4C8, .end = 0xC4C8 },
1158 { .start = 0xC4E0, .end = 0xC4E0 },
1159 { .start = 0xC600, .end = 0xC600 },
1160 { .start = 0xC658, .end = 0xC658 },
1161 { .start = 0xCFD4, .end = 0xCFDC },
1162 { .start = 0x22030, .end = 0x22030 },
1163 { .start = 0x22510, .end = 0x22550 },
1164};
1165
1166static const struct i915_range xelpmp_shadowed_regs[] = {
1167 { .start = 0x1C0030, .end = 0x1C0030 },
1168 { .start = 0x1C0510, .end = 0x1C0550 },
1169 { .start = 0x1C8030, .end = 0x1C8030 },
1170 { .start = 0x1C8510, .end = 0x1C8550 },
1171 { .start = 0x1D0030, .end = 0x1D0030 },
1172 { .start = 0x1D0510, .end = 0x1D0550 },
1173 { .start = 0x38A008, .end = 0x38A00C },
1174 { .start = 0x38A188, .end = 0x38A188 },
1175 { .start = 0x38A278, .end = 0x38A278 },
1176 { .start = 0x38A540, .end = 0x38A56C },
1177 { .start = 0x38A618, .end = 0x38A618 },
1178 { .start = 0x38C050, .end = 0x38C050 },
1179 { .start = 0x38C340, .end = 0x38C340 },
1180 { .start = 0x38C4C8, .end = 0x38C4C8 },
1181 { .start = 0x38C4E0, .end = 0x38C4E4 },
1182 { .start = 0x38C600, .end = 0x38C600 },
1183 { .start = 0x38C658, .end = 0x38C658 },
1184 { .start = 0x38CFD4, .end = 0x38CFDC },
1185};
1186
f9d56cd6 1187static int mmio_range_cmp(u32 key, const struct i915_range *range)
5a659383 1188{
f9d56cd6 1189 if (key < range->start)
5a659383 1190 return -1;
f9d56cd6 1191 else if (key > range->end)
5a659383
TU
1192 return 1;
1193 else
1194 return 0;
1195}
1196
6cdbb101
MR
1197static bool is_shadowed(struct intel_uncore *uncore, u32 offset)
1198{
1199 if (drm_WARN_ON(&uncore->i915->drm, !uncore->shadowed_reg_table))
1200 return false;
6863b76c 1201
eefac38a
MR
1202 if (IS_GSI_REG(offset))
1203 offset += uncore->gsi_offset;
1204
6cdbb101
MR
1205 return BSEARCH(offset,
1206 uncore->shadowed_reg_table,
1207 uncore->shadowed_reg_table_entries,
1208 mmio_range_cmp);
1209}
a89a70a8 1210
ccb2acea
DCS
1211static enum forcewake_domains
1212gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
1213{
1214 return FORCEWAKE_RENDER;
1215}
1216
d32e8ed9
MR
1217#define __fwtable_reg_read_fw_domains(uncore, offset) \
1218({ \
1219 enum forcewake_domains __fwd = 0; \
1220 if (NEEDS_FORCE_WAKE((offset))) \
1221 __fwd = find_fw_domain(uncore, offset); \
1222 __fwd; \
1223})
1224
1225#define __fwtable_reg_write_fw_domains(uncore, offset) \
1226({ \
1227 enum forcewake_domains __fwd = 0; \
1228 const u32 __offset = (offset); \
1229 if (NEEDS_FORCE_WAKE((__offset)) && !is_shadowed(uncore, __offset)) \
1230 __fwd = find_fw_domain(uncore, __offset); \
1231 __fwd; \
1232})
1233
1234#define GEN_FW_RANGE(s, e, d) \
1235 { .start = (s), .end = (e), .domains = (d) }
1236
1237/*
1238 * All platforms' forcewake tables below must be sorted by offset ranges.
1239 * Furthermore, new forcewake tables added should be "watertight" and have
1240 * no gaps between ranges.
1241 *
1242 * When there are multiple consecutive ranges listed in the bspec with
1243 * the same forcewake domain, it is customary to combine them into a single
1244 * row in the tables below to keep the tables small and lookups fast.
1245 * Likewise, reserved/unused ranges may be combined with the preceding and/or
1246 * following ranges since the driver will never be making MMIO accesses in
1247 * those ranges.
1248 *
1249 * For example, if the bspec were to list:
1250 *
1251 * ...
1252 * 0x1000 - 0x1fff: GT
1253 * 0x2000 - 0x2cff: GT
1254 * 0x2d00 - 0x2fff: unused/reserved
1255 * 0x3000 - 0xffff: GT
1256 * ...
1257 *
1258 * these could all be represented by a single line in the code:
1259 *
1260 * GEN_FW_RANGE(0x1000, 0xffff, FORCEWAKE_GT)
1261 *
1262 * When adding new forcewake tables here, please also add them to
1263 * intel_uncore_mock_selftests in selftests/intel_uncore.c so that they will be
1264 * scanned for obvious mistakes or typos by the selftests.
1265 */
1266
1ab2b4cd
MR
1267static const struct intel_forcewake_range __gen6_fw_ranges[] = {
1268 GEN_FW_RANGE(0x0, 0x3ffff, FORCEWAKE_RENDER),
1269};
6863b76c 1270
d32e8ed9
MR
1271static const struct intel_forcewake_range __vlv_fw_ranges[] = {
1272 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
1273 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
1274 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
1275 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1276 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
1277 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
1278 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
1279};
1280
9fc1117c
TU
1281static const struct intel_forcewake_range __chv_fw_ranges[] = {
1282 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
b0081239 1283 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c 1284 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
b0081239 1285 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c 1286 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
b0081239 1287 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c 1288 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
b0081239
TU
1289 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1290 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
9fc1117c 1291 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
b0081239
TU
1292 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
1293 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c
TU
1294 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1295 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
1296 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
1297 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
9fc1117c 1298};
38fb6a40 1299
9fc1117c 1300static const struct intel_forcewake_range __gen9_fw_ranges[] = {
55e3c170 1301 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_GT),
9fc1117c
TU
1302 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
1303 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
55e3c170 1304 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
9fc1117c 1305 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
55e3c170 1306 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT),
9fc1117c 1307 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
55e3c170 1308 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_GT),
b0081239 1309 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
9fc1117c 1310 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
55e3c170 1311 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT),
9fc1117c 1312 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
55e3c170 1313 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT),
b0081239 1314 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
55e3c170 1315 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_GT),
9fc1117c 1316 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
55e3c170 1317 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_GT),
9fc1117c 1318 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
55e3c170 1319 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT),
b0081239 1320 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
55e3c170 1321 GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_GT),
9fc1117c 1322 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
55e3c170 1323 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_GT),
b0081239 1324 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
55e3c170 1325 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_GT),
9fc1117c 1326 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
55e3c170 1327 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_GT),
9fc1117c 1328 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
55e3c170 1329 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_GT),
b0081239 1330 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
55e3c170 1331 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_GT),
9fc1117c
TU
1332 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
1333};
6863b76c 1334
a89a70a8 1335static const struct intel_forcewake_range __gen11_fw_ranges[] = {
c4310def 1336 GEN_FW_RANGE(0x0, 0x1fff, 0), /* uncore range */
a89a70a8 1337 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
55e3c170 1338 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
a89a70a8 1339 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
55e3c170 1340 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT),
a89a70a8 1341 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
55e3c170 1342 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
a89a70a8 1343 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
55e3c170 1344 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT),
a89a70a8 1345 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
55e3c170 1346 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT),
c4310def 1347 GEN_FW_RANGE(0x8800, 0x8bff, 0),
a89a70a8 1348 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
55e3c170 1349 GEN_FW_RANGE(0x8d00, 0x94cf, FORCEWAKE_GT),
c4310def
RS
1350 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1351 GEN_FW_RANGE(0x9560, 0x95ff, 0),
55e3c170 1352 GEN_FW_RANGE(0x9600, 0xafff, FORCEWAKE_GT),
a89a70a8 1353 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
55e3c170 1354 GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_GT),
c9f8d187 1355 GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER),
55e3c170 1356 GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_GT),
c9f8d187 1357 GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER),
55e3c170 1358 GEN_FW_RANGE(0x1a000, 0x23fff, FORCEWAKE_GT),
c4310def 1359 GEN_FW_RANGE(0x24000, 0x2407f, 0),
55e3c170 1360 GEN_FW_RANGE(0x24080, 0x2417f, FORCEWAKE_GT),
c4310def 1361 GEN_FW_RANGE(0x24180, 0x242ff, FORCEWAKE_RENDER),
55e3c170 1362 GEN_FW_RANGE(0x24300, 0x243ff, FORCEWAKE_GT),
c4310def 1363 GEN_FW_RANGE(0x24400, 0x24fff, FORCEWAKE_RENDER),
55e3c170 1364 GEN_FW_RANGE(0x25000, 0x3ffff, FORCEWAKE_GT),
a89a70a8
DCS
1365 GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1366 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
c4310def
RS
1367 GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
1368 GEN_FW_RANGE(0x1c8000, 0x1cffff, FORCEWAKE_MEDIA_VEBOX0),
a89a70a8 1369 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
c4310def 1370 GEN_FW_RANGE(0x1d4000, 0x1dbfff, 0)
a89a70a8
DCS
1371};
1372
cf82d9dd 1373static const struct intel_forcewake_range __gen12_fw_ranges[] = {
92f5df0d
MR
1374 GEN_FW_RANGE(0x0, 0x1fff, 0), /*
1375 0x0 - 0xaff: reserved
1376 0xb00 - 0x1fff: always on */
cf82d9dd 1377 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
92f5df0d
MR
1378 GEN_FW_RANGE(0x2700, 0x27ff, FORCEWAKE_GT),
1379 GEN_FW_RANGE(0x2800, 0x2aff, FORCEWAKE_RENDER),
1380 GEN_FW_RANGE(0x2b00, 0x2fff, FORCEWAKE_GT),
cf82d9dd 1381 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
92f5df0d
MR
1382 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT), /*
1383 0x4000 - 0x48ff: gt
1384 0x4900 - 0x51ff: reserved */
1385 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), /*
1386 0x5200 - 0x53ff: render
1387 0x5400 - 0x54ff: reserved
1388 0x5500 - 0x7fff: render */
55e3c170 1389 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
cf82d9dd 1390 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
92f5df0d
MR
1391 GEN_FW_RANGE(0x8160, 0x81ff, 0), /*
1392 0x8160 - 0x817f: reserved
1393 0x8180 - 0x81ff: always on */
1394 GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT),
cf82d9dd 1395 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
92f5df0d
MR
1396 GEN_FW_RANGE(0x8500, 0x94cf, FORCEWAKE_GT), /*
1397 0x8500 - 0x87ff: gt
1398 0x8800 - 0x8fff: reserved
1399 0x9000 - 0x947f: gt
1400 0x9480 - 0x94cf: reserved */
1401 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1402 GEN_FW_RANGE(0x9560, 0x97ff, 0), /*
1403 0x9560 - 0x95ff: always on
1404 0x9600 - 0x97ff: reserved */
55e3c170 1405 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT),
92f5df0d
MR
1406 GEN_FW_RANGE(0xb000, 0xb3ff, FORCEWAKE_RENDER),
1407 GEN_FW_RANGE(0xb400, 0xcfff, FORCEWAKE_GT), /*
1408 0xb400 - 0xbf7f: gt
1409 0xb480 - 0xbfff: reserved
1410 0xc000 - 0xcfff: gt */
1411 GEN_FW_RANGE(0xd000, 0xd7ff, 0),
1412 GEN_FW_RANGE(0xd800, 0xd8ff, FORCEWAKE_RENDER),
1413 GEN_FW_RANGE(0xd900, 0xdbff, FORCEWAKE_GT),
1414 GEN_FW_RANGE(0xdc00, 0xefff, FORCEWAKE_RENDER), /*
1415 0xdc00 - 0xddff: render
1416 0xde00 - 0xde7f: reserved
1417 0xde80 - 0xe8ff: render
1418 0xe900 - 0xefff: reserved */
1419 GEN_FW_RANGE(0xf000, 0x147ff, FORCEWAKE_GT), /*
1420 0xf000 - 0xffff: gt
1421 0x10000 - 0x147ff: reserved */
1422 GEN_FW_RANGE(0x14800, 0x1ffff, FORCEWAKE_RENDER), /*
1423 0x14800 - 0x14fff: render
1424 0x15000 - 0x16dff: reserved
1425 0x16e00 - 0x1bfff: render
1426 0x1c000 - 0x1ffff: reserved */
1427 GEN_FW_RANGE(0x20000, 0x20fff, FORCEWAKE_MEDIA_VDBOX0),
1428 GEN_FW_RANGE(0x21000, 0x21fff, FORCEWAKE_MEDIA_VDBOX2),
1429 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
1430 GEN_FW_RANGE(0x24000, 0x2417f, 0), /*
1431 0x24000 - 0x2407f: always on
1432 0x24080 - 0x2417f: reserved */
1433 GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /*
1434 0x24180 - 0x241ff: gt
1435 0x24200 - 0x249ff: reserved */
1436 GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /*
1437 0x24a00 - 0x24a7f: render
1438 0x24a80 - 0x251ff: reserved */
1439 GEN_FW_RANGE(0x25200, 0x255ff, FORCEWAKE_GT), /*
1440 0x25200 - 0x252ff: gt
1441 0x25300 - 0x255ff: reserved */
1442 GEN_FW_RANGE(0x25600, 0x2567f, FORCEWAKE_MEDIA_VDBOX0),
1443 GEN_FW_RANGE(0x25680, 0x259ff, FORCEWAKE_MEDIA_VDBOX2), /*
1444 0x25680 - 0x256ff: VD2
1445 0x25700 - 0x259ff: reserved */
1446 GEN_FW_RANGE(0x25a00, 0x25a7f, FORCEWAKE_MEDIA_VDBOX0),
1447 GEN_FW_RANGE(0x25a80, 0x2ffff, FORCEWAKE_MEDIA_VDBOX2), /*
1448 0x25a80 - 0x25aff: VD2
1449 0x25b00 - 0x2ffff: reserved */
1450 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
cf82d9dd 1451 GEN_FW_RANGE(0x40000, 0x1bffff, 0),
92f5df0d
MR
1452 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /*
1453 0x1c0000 - 0x1c2bff: VD0
1454 0x1c2c00 - 0x1c2cff: reserved
1455 0x1c2d00 - 0x1c2dff: VD0
1456 0x1c2e00 - 0x1c3eff: reserved
1457 0x1c3f00 - 0x1c3fff: VD0 */
1458 GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
1459 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*
1460 0x1c8000 - 0x1ca0ff: VE0
1461 0x1ca100 - 0x1cbeff: reserved
1462 0x1cbf00 - 0x1cbfff: VE0 */
1463 GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX0), /*
1464 0x1cc000 - 0x1ccfff: VD0
1465 0x1cd000 - 0x1cffff: reserved */
1466 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /*
1467 0x1d0000 - 0x1d2bff: VD2
1468 0x1d2c00 - 0x1d2cff: reserved
1469 0x1d2d00 - 0x1d2dff: VD2
1470 0x1d2e00 - 0x1d3eff: reserved
1471 0x1d3f00 - 0x1d3fff: VD2 */
cf82d9dd
MT
1472};
1473
e0531636
MR
1474/*
1475 * Graphics IP version 12.55 brings a slight change to the 0xd800 range,
1476 * switching it from the GT domain to the render domain.
e0531636
MR
1477 */
1478#define XEHP_FWRANGES(FW_RANGE_D800) \
1479 GEN_FW_RANGE(0x0, 0x1fff, 0), /* \
1480 0x0 - 0xaff: reserved \
1481 0xb00 - 0x1fff: always on */ \
1482 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), \
1483 GEN_FW_RANGE(0x2700, 0x4aff, FORCEWAKE_GT), \
1484 GEN_FW_RANGE(0x4b00, 0x51ff, 0), /* \
1485 0x4b00 - 0x4fff: reserved \
1486 0x5000 - 0x51ff: always on */ \
1487 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), \
1488 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT), \
1489 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), \
1490 GEN_FW_RANGE(0x8160, 0x81ff, 0), /* \
1491 0x8160 - 0x817f: reserved \
1492 0x8180 - 0x81ff: always on */ \
1493 GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT), \
1494 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), \
1495 GEN_FW_RANGE(0x8500, 0x8cff, FORCEWAKE_GT), /* \
1496 0x8500 - 0x87ff: gt \
1497 0x8800 - 0x8c7f: reserved \
1498 0x8c80 - 0x8cff: gt (DG2 only) */ \
1499 GEN_FW_RANGE(0x8d00, 0x8fff, FORCEWAKE_RENDER), /* \
1500 0x8d00 - 0x8dff: render (DG2 only) \
1501 0x8e00 - 0x8fff: reserved */ \
1502 GEN_FW_RANGE(0x9000, 0x94cf, FORCEWAKE_GT), /* \
1503 0x9000 - 0x947f: gt \
1504 0x9480 - 0x94cf: reserved */ \
1505 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER), \
1506 GEN_FW_RANGE(0x9560, 0x967f, 0), /* \
1507 0x9560 - 0x95ff: always on \
1508 0x9600 - 0x967f: reserved */ \
1509 GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER), /* \
1510 0x9680 - 0x96ff: render (DG2 only) \
1511 0x9700 - 0x97ff: reserved */ \
1512 GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /* \
1513 0x9800 - 0xb4ff: gt \
1514 0xb500 - 0xbfff: reserved \
1515 0xc000 - 0xcfff: gt */ \
1516 GEN_FW_RANGE(0xd000, 0xd7ff, 0), \
1517 GEN_FW_RANGE(0xd800, 0xd87f, FW_RANGE_D800), \
1518 GEN_FW_RANGE(0xd880, 0xdbff, FORCEWAKE_GT), \
1519 GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER), \
1520 GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /* \
1521 0xdd00 - 0xddff: gt \
1522 0xde00 - 0xde7f: reserved */ \
1523 GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /* \
1524 0xde80 - 0xdfff: render \
1525 0xe000 - 0xe0ff: reserved \
1526 0xe100 - 0xe8ff: render */ \
1527 GEN_FW_RANGE(0xe900, 0xffff, FORCEWAKE_GT), /* \
1528 0xe900 - 0xe9ff: gt \
1529 0xea00 - 0xefff: reserved \
1530 0xf000 - 0xffff: gt */ \
1531 GEN_FW_RANGE(0x10000, 0x12fff, 0), /* \
1532 0x10000 - 0x11fff: reserved \
1533 0x12000 - 0x127ff: always on \
1534 0x12800 - 0x12fff: reserved */ \
1535 GEN_FW_RANGE(0x13000, 0x131ff, FORCEWAKE_MEDIA_VDBOX0), /* DG2 only */ \
1536 GEN_FW_RANGE(0x13200, 0x13fff, FORCEWAKE_MEDIA_VDBOX2), /* \
1537 0x13200 - 0x133ff: VD2 (DG2 only) \
1538 0x13400 - 0x13fff: reserved */ \
1539 GEN_FW_RANGE(0x14000, 0x141ff, FORCEWAKE_MEDIA_VDBOX0), /* XEHPSDV only */ \
1540 GEN_FW_RANGE(0x14200, 0x143ff, FORCEWAKE_MEDIA_VDBOX2), /* XEHPSDV only */ \
1541 GEN_FW_RANGE(0x14400, 0x145ff, FORCEWAKE_MEDIA_VDBOX4), /* XEHPSDV only */ \
1542 GEN_FW_RANGE(0x14600, 0x147ff, FORCEWAKE_MEDIA_VDBOX6), /* XEHPSDV only */ \
1543 GEN_FW_RANGE(0x14800, 0x14fff, FORCEWAKE_RENDER), \
1544 GEN_FW_RANGE(0x15000, 0x16dff, FORCEWAKE_GT), /* \
1545 0x15000 - 0x15fff: gt (DG2 only) \
1546 0x16000 - 0x16dff: reserved */ \
1547 GEN_FW_RANGE(0x16e00, 0x1ffff, FORCEWAKE_RENDER), \
1548 GEN_FW_RANGE(0x20000, 0x21fff, FORCEWAKE_MEDIA_VDBOX0), /* \
1549 0x20000 - 0x20fff: VD0 (XEHPSDV only) \
1550 0x21000 - 0x21fff: reserved */ \
1551 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT), \
1552 GEN_FW_RANGE(0x24000, 0x2417f, 0), /* \
1553 0x24000 - 0x2407f: always on \
1554 0x24080 - 0x2417f: reserved */ \
1555 GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /* \
1556 0x24180 - 0x241ff: gt \
1557 0x24200 - 0x249ff: reserved */ \
1558 GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /* \
1559 0x24a00 - 0x24a7f: render \
1560 0x24a80 - 0x251ff: reserved */ \
1561 GEN_FW_RANGE(0x25200, 0x25fff, FORCEWAKE_GT), /* \
1562 0x25200 - 0x252ff: gt \
1563 0x25300 - 0x25fff: reserved */ \
1564 GEN_FW_RANGE(0x26000, 0x2ffff, FORCEWAKE_RENDER), /* \
1565 0x26000 - 0x27fff: render \
1566 0x28000 - 0x29fff: reserved \
1567 0x2a000 - 0x2ffff: undocumented */ \
1568 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT), \
1569 GEN_FW_RANGE(0x40000, 0x1bffff, 0), \
1570 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /* \
1571 0x1c0000 - 0x1c2bff: VD0 \
1572 0x1c2c00 - 0x1c2cff: reserved \
1573 0x1c2d00 - 0x1c2dff: VD0 \
1574 0x1c2e00 - 0x1c3eff: VD0 (DG2 only) \
1575 0x1c3f00 - 0x1c3fff: VD0 */ \
1576 GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), /* \
1577 0x1c4000 - 0x1c6bff: VD1 \
1578 0x1c6c00 - 0x1c6cff: reserved \
1579 0x1c6d00 - 0x1c6dff: VD1 \
1580 0x1c6e00 - 0x1c7fff: reserved */ \
1581 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /* \
1582 0x1c8000 - 0x1ca0ff: VE0 \
1583 0x1ca100 - 0x1cbfff: reserved */ \
1584 GEN_FW_RANGE(0x1cc000, 0x1ccfff, FORCEWAKE_MEDIA_VDBOX0), \
1585 GEN_FW_RANGE(0x1cd000, 0x1cdfff, FORCEWAKE_MEDIA_VDBOX2), \
1586 GEN_FW_RANGE(0x1ce000, 0x1cefff, FORCEWAKE_MEDIA_VDBOX4), \
1587 GEN_FW_RANGE(0x1cf000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX6), \
1588 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /* \
1589 0x1d0000 - 0x1d2bff: VD2 \
1590 0x1d2c00 - 0x1d2cff: reserved \
1591 0x1d2d00 - 0x1d2dff: VD2 \
1592 0x1d2e00 - 0x1d3dff: VD2 (DG2 only) \
1593 0x1d3e00 - 0x1d3eff: reserved \
1594 0x1d3f00 - 0x1d3fff: VD2 */ \
1595 GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), /* \
1596 0x1d4000 - 0x1d6bff: VD3 \
1597 0x1d6c00 - 0x1d6cff: reserved \
1598 0x1d6d00 - 0x1d6dff: VD3 \
1599 0x1d6e00 - 0x1d7fff: reserved */ \
1600 GEN_FW_RANGE(0x1d8000, 0x1dffff, FORCEWAKE_MEDIA_VEBOX1), /* \
1601 0x1d8000 - 0x1da0ff: VE1 \
1602 0x1da100 - 0x1dffff: reserved */ \
1603 GEN_FW_RANGE(0x1e0000, 0x1e3fff, FORCEWAKE_MEDIA_VDBOX4), /* \
1604 0x1e0000 - 0x1e2bff: VD4 \
1605 0x1e2c00 - 0x1e2cff: reserved \
1606 0x1e2d00 - 0x1e2dff: VD4 \
1607 0x1e2e00 - 0x1e3eff: reserved \
1608 0x1e3f00 - 0x1e3fff: VD4 */ \
1609 GEN_FW_RANGE(0x1e4000, 0x1e7fff, FORCEWAKE_MEDIA_VDBOX5), /* \
1610 0x1e4000 - 0x1e6bff: VD5 \
1611 0x1e6c00 - 0x1e6cff: reserved \
1612 0x1e6d00 - 0x1e6dff: VD5 \
1613 0x1e6e00 - 0x1e7fff: reserved */ \
1614 GEN_FW_RANGE(0x1e8000, 0x1effff, FORCEWAKE_MEDIA_VEBOX2), /* \
1615 0x1e8000 - 0x1ea0ff: VE2 \
1616 0x1ea100 - 0x1effff: reserved */ \
1617 GEN_FW_RANGE(0x1f0000, 0x1f3fff, FORCEWAKE_MEDIA_VDBOX6), /* \
1618 0x1f0000 - 0x1f2bff: VD6 \
1619 0x1f2c00 - 0x1f2cff: reserved \
1620 0x1f2d00 - 0x1f2dff: VD6 \
1621 0x1f2e00 - 0x1f3eff: reserved \
1622 0x1f3f00 - 0x1f3fff: VD6 */ \
1623 GEN_FW_RANGE(0x1f4000, 0x1f7fff, FORCEWAKE_MEDIA_VDBOX7), /* \
1624 0x1f4000 - 0x1f6bff: VD7 \
1625 0x1f6c00 - 0x1f6cff: reserved \
1626 0x1f6d00 - 0x1f6dff: VD7 \
1627 0x1f6e00 - 0x1f7fff: reserved */ \
bfac1e2b 1628 GEN_FW_RANGE(0x1f8000, 0x1fa0ff, FORCEWAKE_MEDIA_VEBOX3),
e0531636
MR
1629
1630static const struct intel_forcewake_range __xehp_fw_ranges[] = {
1631 XEHP_FWRANGES(FORCEWAKE_GT)
1632};
1633
1634static const struct intel_forcewake_range __dg2_fw_ranges[] = {
1635 XEHP_FWRANGES(FORCEWAKE_RENDER)
bfac1e2b
MR
1636};
1637
fb289464
MR
1638static const struct intel_forcewake_range __pvc_fw_ranges[] = {
1639 GEN_FW_RANGE(0x0, 0xaff, 0),
1640 GEN_FW_RANGE(0xb00, 0xbff, FORCEWAKE_GT),
1641 GEN_FW_RANGE(0xc00, 0xfff, 0),
1642 GEN_FW_RANGE(0x1000, 0x1fff, FORCEWAKE_GT),
1643 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1644 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1645 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1646 GEN_FW_RANGE(0x4000, 0x813f, FORCEWAKE_GT), /*
1647 0x4000 - 0x4aff: gt
1648 0x4b00 - 0x4fff: reserved
1649 0x5000 - 0x51ff: gt
1650 0x5200 - 0x52ff: reserved
1651 0x5300 - 0x53ff: gt
1652 0x5400 - 0x7fff: reserved
1653 0x8000 - 0x813f: gt */
1654 GEN_FW_RANGE(0x8140, 0x817f, FORCEWAKE_RENDER),
1655 GEN_FW_RANGE(0x8180, 0x81ff, 0),
1656 GEN_FW_RANGE(0x8200, 0x94cf, FORCEWAKE_GT), /*
1657 0x8200 - 0x82ff: gt
1658 0x8300 - 0x84ff: reserved
1659 0x8500 - 0x887f: gt
1660 0x8880 - 0x8a7f: reserved
1661 0x8a80 - 0x8aff: gt
1662 0x8b00 - 0x8fff: reserved
1663 0x9000 - 0x947f: gt
1664 0x9480 - 0x94cf: reserved */
1665 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1666 GEN_FW_RANGE(0x9560, 0x967f, 0), /*
1667 0x9560 - 0x95ff: always on
1668 0x9600 - 0x967f: reserved */
1669 GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER), /*
1670 0x9680 - 0x96ff: render
1671 0x9700 - 0x97ff: reserved */
1672 GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /*
1673 0x9800 - 0xb4ff: gt
1674 0xb500 - 0xbfff: reserved
1675 0xc000 - 0xcfff: gt */
1676 GEN_FW_RANGE(0xd000, 0xd3ff, 0),
1677 GEN_FW_RANGE(0xd400, 0xdbff, FORCEWAKE_GT),
1678 GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER),
1679 GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /*
1680 0xdd00 - 0xddff: gt
1681 0xde00 - 0xde7f: reserved */
1682 GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /*
1683 0xde80 - 0xdeff: render
1684 0xdf00 - 0xe1ff: reserved
1685 0xe200 - 0xe7ff: render
1686 0xe800 - 0xe8ff: reserved */
1687 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_GT), /*
1688 0xe900 - 0xe9ff: gt
1689 0xea00 - 0xebff: reserved
1690 0xec00 - 0xffff: gt
1691 0x10000 - 0x11fff: reserved */
1692 GEN_FW_RANGE(0x12000, 0x12fff, 0), /*
1693 0x12000 - 0x127ff: always on
1694 0x12800 - 0x12fff: reserved */
2d3093fd 1695 GEN_FW_RANGE(0x13000, 0x19fff, FORCEWAKE_GT), /*
fb289464
MR
1696 0x13000 - 0x135ff: gt
1697 0x13600 - 0x147ff: reserved
1698 0x14800 - 0x153ff: gt
2d3093fd
MR
1699 0x15400 - 0x19fff: reserved */
1700 GEN_FW_RANGE(0x1a000, 0x21fff, FORCEWAKE_RENDER), /*
1701 0x1a000 - 0x1ffff: render
1702 0x20000 - 0x21fff: reserved */
1703 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
fb289464
MR
1704 GEN_FW_RANGE(0x24000, 0x2417f, 0), /*
1705 24000 - 0x2407f: always on
1706 24080 - 0x2417f: reserved */
2d3093fd 1707 GEN_FW_RANGE(0x24180, 0x25fff, FORCEWAKE_GT), /*
fb289464
MR
1708 0x24180 - 0x241ff: gt
1709 0x24200 - 0x251ff: reserved
1710 0x25200 - 0x252ff: gt
2d3093fd
MR
1711 0x25300 - 0x25fff: reserved */
1712 GEN_FW_RANGE(0x26000, 0x2ffff, FORCEWAKE_RENDER), /*
1713 0x26000 - 0x27fff: render
1714 0x28000 - 0x2ffff: reserved */
1715 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
fb289464
MR
1716 GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1717 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /*
1718 0x1c0000 - 0x1c2bff: VD0
1719 0x1c2c00 - 0x1c2cff: reserved
1720 0x1c2d00 - 0x1c2dff: VD0
1721 0x1c2e00 - 0x1c3eff: reserved
1722 0x1c3f00 - 0x1c3fff: VD0 */
1723 GEN_FW_RANGE(0x1c4000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX1), /*
1724 0x1c4000 - 0x1c6aff: VD1
1725 0x1c6b00 - 0x1c7eff: reserved
1726 0x1c7f00 - 0x1c7fff: VD1
1727 0x1c8000 - 0x1cffff: reserved */
1728 GEN_FW_RANGE(0x1d0000, 0x23ffff, FORCEWAKE_MEDIA_VDBOX2), /*
1729 0x1d0000 - 0x1d2aff: VD2
1730 0x1d2b00 - 0x1d3eff: reserved
1731 0x1d3f00 - 0x1d3fff: VD2
1732 0x1d4000 - 0x23ffff: reserved */
1733 GEN_FW_RANGE(0x240000, 0x3dffff, 0),
1734 GEN_FW_RANGE(0x3e0000, 0x3effff, FORCEWAKE_GT),
1735};
1736
14f2f9bf
MR
1737static const struct intel_forcewake_range __mtl_fw_ranges[] = {
1738 GEN_FW_RANGE(0x0, 0xaff, 0),
1739 GEN_FW_RANGE(0xb00, 0xbff, FORCEWAKE_GT),
1740 GEN_FW_RANGE(0xc00, 0xfff, 0),
1741 GEN_FW_RANGE(0x1000, 0x1fff, FORCEWAKE_GT),
1742 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1743 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1744 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1745 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT), /*
1746 0x4000 - 0x48ff: render
1747 0x4900 - 0x51ff: reserved */
1748 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), /*
1749 0x5200 - 0x53ff: render
1750 0x5400 - 0x54ff: reserved
1751 0x5500 - 0x7fff: render */
1752 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1753 GEN_FW_RANGE(0x8140, 0x817f, FORCEWAKE_RENDER), /*
1754 0x8140 - 0x815f: render
1755 0x8160 - 0x817f: reserved */
1756 GEN_FW_RANGE(0x8180, 0x81ff, 0),
1757 GEN_FW_RANGE(0x8200, 0x94cf, FORCEWAKE_GT), /*
1758 0x8200 - 0x87ff: gt
1759 0x8800 - 0x8dff: reserved
1760 0x8e00 - 0x8f7f: gt
1761 0x8f80 - 0x8fff: reserved
1762 0x9000 - 0x947f: gt
1763 0x9480 - 0x94cf: reserved */
1764 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1765 GEN_FW_RANGE(0x9560, 0x967f, 0), /*
1766 0x9560 - 0x95ff: always on
1767 0x9600 - 0x967f: reserved */
1768 GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER), /*
1769 0x9680 - 0x96ff: render
1770 0x9700 - 0x97ff: reserved */
1771 GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /*
1772 0x9800 - 0xb4ff: gt
1773 0xb500 - 0xbfff: reserved
1774 0xc000 - 0xcfff: gt */
1775 GEN_FW_RANGE(0xd000, 0xd7ff, 0), /*
1776 0xd000 - 0xd3ff: always on
1777 0xd400 - 0xd7ff: reserved */
1778 GEN_FW_RANGE(0xd800, 0xd87f, FORCEWAKE_RENDER),
1779 GEN_FW_RANGE(0xd880, 0xdbff, FORCEWAKE_GT),
1780 GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER),
1781 GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /*
1782 0xdd00 - 0xddff: gt
1783 0xde00 - 0xde7f: reserved */
1784 GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /*
1785 0xde80 - 0xdfff: render
1786 0xe000 - 0xe0ff: reserved
1787 0xe100 - 0xe8ff: render */
1788 GEN_FW_RANGE(0xe900, 0xe9ff, FORCEWAKE_GT),
1789 GEN_FW_RANGE(0xea00, 0x147ff, 0), /*
1790 0xea00 - 0x11fff: reserved
1791 0x12000 - 0x127ff: always on
1792 0x12800 - 0x147ff: reserved */
1793 GEN_FW_RANGE(0x14800, 0x19fff, FORCEWAKE_GT), /*
1794 0x14800 - 0x153ff: gt
1795 0x15400 - 0x19fff: reserved */
1796 GEN_FW_RANGE(0x1a000, 0x21fff, FORCEWAKE_RENDER), /*
1797 0x1a000 - 0x1bfff: render
1798 0x1c000 - 0x21fff: reserved */
1799 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
1800 GEN_FW_RANGE(0x24000, 0x2ffff, 0), /*
1801 0x24000 - 0x2407f: always on
1802 0x24080 - 0x2ffff: reserved */
1803 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT)
1804};
1805
1806/*
1807 * Note that the register ranges here are the final offsets after
1808 * translation of the GSI block to the 0x380000 offset.
1809 *
1810 * NOTE: There are a couple MCR ranges near the bottom of this table
1811 * that need to power up either VD0 or VD2 depending on which replicated
1812 * instance of the register we're trying to access. Our forcewake logic
1813 * at the moment doesn't have a good way to take steering into consideration,
1814 * and the driver doesn't even access any registers in those ranges today,
1815 * so for now we just mark those ranges as FORCEWAKE_ALL. That will ensure
1816 * proper operation if we do start using the ranges in the future, and we
1817 * can determine at that time whether it's worth adding extra complexity to
1818 * the forcewake handling to take steering into consideration.
1819 */
1820static const struct intel_forcewake_range __xelpmp_fw_ranges[] = {
1821 GEN_FW_RANGE(0x0, 0x115fff, 0), /* render GT range */
1822 GEN_FW_RANGE(0x116000, 0x11ffff, FORCEWAKE_GSC), /*
1823 0x116000 - 0x117fff: gsc
1824 0x118000 - 0x119fff: reserved
1825 0x11a000 - 0x11efff: gsc
1826 0x11f000 - 0x11ffff: reserved */
1827 GEN_FW_RANGE(0x120000, 0x1bffff, 0), /* non-GT range */
1828 GEN_FW_RANGE(0x1c0000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX0), /*
1829 0x1c0000 - 0x1c3dff: VD0
1830 0x1c3e00 - 0x1c3eff: reserved
1831 0x1c3f00 - 0x1c3fff: VD0
1832 0x1c4000 - 0x1c7fff: reserved */
1833 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*
1834 0x1c8000 - 0x1ca0ff: VE0
1835 0x1ca100 - 0x1cbfff: reserved */
1836 GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX0), /*
1837 0x1cc000 - 0x1cdfff: VD0
1838 0x1ce000 - 0x1cffff: reserved */
1839 GEN_FW_RANGE(0x1d0000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX2), /*
1840 0x1d0000 - 0x1d3dff: VD2
1841 0x1d3e00 - 0x1d3eff: reserved
1842 0x1d4000 - 0x1d7fff: VD2 */
1843 GEN_FW_RANGE(0x1d8000, 0x1da0ff, FORCEWAKE_MEDIA_VEBOX1),
1844 GEN_FW_RANGE(0x1da100, 0x380aff, 0), /*
1845 0x1da100 - 0x23ffff: reserved
1846 0x240000 - 0x37ffff: non-GT range
1847 0x380000 - 0x380aff: reserved */
1848 GEN_FW_RANGE(0x380b00, 0x380bff, FORCEWAKE_GT),
1849 GEN_FW_RANGE(0x380c00, 0x380fff, 0),
1850 GEN_FW_RANGE(0x381000, 0x38817f, FORCEWAKE_GT), /*
1851 0x381000 - 0x381fff: gt
1852 0x382000 - 0x383fff: reserved
1853 0x384000 - 0x384aff: gt
1854 0x384b00 - 0x3851ff: reserved
1855 0x385200 - 0x3871ff: gt
1856 0x387200 - 0x387fff: reserved
1857 0x388000 - 0x38813f: gt
1858 0x388140 - 0x38817f: reserved */
1859 GEN_FW_RANGE(0x388180, 0x3882ff, 0), /*
1860 0x388180 - 0x3881ff: always on
1861 0x388200 - 0x3882ff: reserved */
1862 GEN_FW_RANGE(0x388300, 0x38955f, FORCEWAKE_GT), /*
1863 0x388300 - 0x38887f: gt
1864 0x388880 - 0x388fff: reserved
1865 0x389000 - 0x38947f: gt
1866 0x389480 - 0x38955f: reserved */
1867 GEN_FW_RANGE(0x389560, 0x389fff, 0), /*
1868 0x389560 - 0x3895ff: always on
1869 0x389600 - 0x389fff: reserved */
1870 GEN_FW_RANGE(0x38a000, 0x38cfff, FORCEWAKE_GT), /*
1871 0x38a000 - 0x38afff: gt
1872 0x38b000 - 0x38bfff: reserved
1873 0x38c000 - 0x38cfff: gt */
1874 GEN_FW_RANGE(0x38d000, 0x38d11f, 0),
1875 GEN_FW_RANGE(0x38d120, 0x391fff, FORCEWAKE_GT), /*
1876 0x38d120 - 0x38dfff: gt
1877 0x38e000 - 0x38efff: reserved
1878 0x38f000 - 0x38ffff: gt
1879 0x389000 - 0x391fff: reserved */
1880 GEN_FW_RANGE(0x392000, 0x392fff, 0), /*
1881 0x392000 - 0x3927ff: always on
1882 0x392800 - 0x292fff: reserved */
1883 GEN_FW_RANGE(0x393000, 0x3931ff, FORCEWAKE_GT),
1884 GEN_FW_RANGE(0x393200, 0x39323f, FORCEWAKE_ALL), /* instance-based, see note above */
1885 GEN_FW_RANGE(0x393240, 0x3933ff, FORCEWAKE_GT),
1886 GEN_FW_RANGE(0x393400, 0x3934ff, FORCEWAKE_ALL), /* instance-based, see note above */
1887 GEN_FW_RANGE(0x393500, 0x393c7f, 0), /*
1888 0x393500 - 0x393bff: reserved
1889 0x393c00 - 0x393c7f: always on */
1890 GEN_FW_RANGE(0x393c80, 0x393dff, FORCEWAKE_GT),
1891};
1892
907b28c5 1893static void
6ebc9692 1894ilk_dummy_write(struct intel_uncore *uncore)
907b28c5
CW
1895{
1896 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
1897 * the chip from rc6 before touching it for real. MI_MODE is masked,
1898 * hence harmless to write 0 into. */
ab9e00a3 1899 __raw_uncore_write32(uncore, RING_MI_MODE(RENDER_RING_BASE), 0);
907b28c5
CW
1900}
1901
1902static void
2cf7bf6f 1903__unclaimed_reg_debug(struct intel_uncore *uncore,
9c053501 1904 const i915_reg_t reg,
4b276ed3 1905 const bool read)
907b28c5 1906{
a9f236d1 1907 if (drm_WARN(&uncore->i915->drm,
4b276ed3 1908 check_for_unclaimed_mmio(uncore),
a9f236d1
PB
1909 "Unclaimed %s register 0x%x\n",
1910 read ? "read from" : "write to",
1911 i915_mmio_reg_offset(reg)))
4f044a88 1912 /* Only report the first N failures */
8a25c4be 1913 uncore->i915->params.mmio_debug--;
907b28c5
CW
1914}
1915
4b276ed3
LDM
1916static void
1917__unclaimed_previous_reg_debug(struct intel_uncore *uncore,
1918 const i915_reg_t reg,
1919 const bool read)
1920{
618f5df1
LDM
1921 if (check_for_unclaimed_mmio(uncore))
1922 drm_dbg(&uncore->i915->drm,
1923 "Unclaimed access detected before %s register 0x%x\n",
1924 read ? "read from" : "write to",
1925 i915_mmio_reg_offset(reg));
4b276ed3
LDM
1926}
1927
d823445b 1928static inline bool __must_check
7afe2340
JN
1929unclaimed_reg_debug_header(struct intel_uncore *uncore,
1930 const i915_reg_t reg, const bool read)
9c053501 1931{
639e30ee 1932 if (likely(!uncore->i915->params.mmio_debug) || !uncore->debug)
d823445b 1933 return false;
9c053501 1934
0a9b2630
DCS
1935 /* interrupts are disabled and re-enabled around uncore->lock usage */
1936 lockdep_assert_held(&uncore->lock);
1937
7afe2340
JN
1938 spin_lock(&uncore->debug->lock);
1939 __unclaimed_previous_reg_debug(uncore, reg, read);
d823445b
JN
1940
1941 return true;
7afe2340
JN
1942}
1943
1944static inline void
1945unclaimed_reg_debug_footer(struct intel_uncore *uncore,
1946 const i915_reg_t reg, const bool read)
1947{
7afe2340
JN
1948 /* interrupts are disabled and re-enabled around uncore->lock usage */
1949 lockdep_assert_held(&uncore->lock);
1950
1951 __unclaimed_reg_debug(uncore, reg, read);
1952 spin_unlock(&uncore->debug->lock);
9c053501
MK
1953}
1954
0e65ce24
CW
1955#define __vgpu_read(x) \
1956static u##x \
1957vgpu_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1958 u##x val = __raw_uncore_read##x(uncore, reg); \
1959 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1960 return val; \
1961}
1962__vgpu_read(8)
1963__vgpu_read(16)
1964__vgpu_read(32)
1965__vgpu_read(64)
1966
51f67885 1967#define GEN2_READ_HEADER(x) \
5d738795 1968 u##x val = 0; \
87b391b9 1969 assert_rpm_wakelock_held(uncore->rpm);
5d738795 1970
51f67885 1971#define GEN2_READ_FOOTER \
5d738795
BW
1972 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1973 return val
1974
51f67885 1975#define __gen2_read(x) \
0b274481 1976static u##x \
a2b4abfc 1977gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
51f67885 1978 GEN2_READ_HEADER(x); \
6cc5ca76 1979 val = __raw_uncore_read##x(uncore, reg); \
51f67885 1980 GEN2_READ_FOOTER; \
3967018e
BW
1981}
1982
1983#define __gen5_read(x) \
1984static u##x \
a2b4abfc 1985gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
51f67885 1986 GEN2_READ_HEADER(x); \
6ebc9692 1987 ilk_dummy_write(uncore); \
6cc5ca76 1988 val = __raw_uncore_read##x(uncore, reg); \
51f67885 1989 GEN2_READ_FOOTER; \
3967018e
BW
1990}
1991
51f67885
CW
1992__gen5_read(8)
1993__gen5_read(16)
1994__gen5_read(32)
1995__gen5_read(64)
1996__gen2_read(8)
1997__gen2_read(16)
1998__gen2_read(32)
1999__gen2_read(64)
2000
2001#undef __gen5_read
2002#undef __gen2_read
2003
2004#undef GEN2_READ_FOOTER
2005#undef GEN2_READ_HEADER
2006
2007#define GEN6_READ_HEADER(x) \
f0f59a00 2008 u32 offset = i915_mmio_reg_offset(reg); \
51f67885 2009 unsigned long irqflags; \
d823445b 2010 bool unclaimed_reg_debug; \
51f67885 2011 u##x val = 0; \
87b391b9 2012 assert_rpm_wakelock_held(uncore->rpm); \
272c7e52 2013 spin_lock_irqsave(&uncore->lock, irqflags); \
d823445b 2014 unclaimed_reg_debug = unclaimed_reg_debug_header(uncore, reg, true)
51f67885
CW
2015
2016#define GEN6_READ_FOOTER \
d823445b
JN
2017 if (unclaimed_reg_debug) \
2018 unclaimed_reg_debug_footer(uncore, reg, true); \
272c7e52 2019 spin_unlock_irqrestore(&uncore->lock, irqflags); \
51f67885
CW
2020 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
2021 return val
2022
f568eeee 2023static noinline void ___force_wake_auto(struct intel_uncore *uncore,
c521b0c8 2024 enum forcewake_domains fw_domains)
b2cff0db
CW
2025{
2026 struct intel_uncore_forcewake_domain *domain;
d2dc94bc
CW
2027 unsigned int tmp;
2028
f568eeee 2029 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
b2cff0db 2030
f568eeee 2031 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)
c521b0c8
TU
2032 fw_domain_arm_timer(domain);
2033
5716c8c6 2034 fw_domains_get(uncore, fw_domains);
c521b0c8
TU
2035}
2036
f568eeee 2037static inline void __force_wake_auto(struct intel_uncore *uncore,
c521b0c8
TU
2038 enum forcewake_domains fw_domains)
2039{
77adbd8f 2040 GEM_BUG_ON(!fw_domains);
b2cff0db 2041
003342a5 2042 /* Turn on all requested but inactive supported forcewake domains. */
f568eeee
DCS
2043 fw_domains &= uncore->fw_domains;
2044 fw_domains &= ~uncore->fw_domains_active;
b2cff0db 2045
c521b0c8 2046 if (fw_domains)
f568eeee 2047 ___force_wake_auto(uncore, fw_domains);
b2cff0db
CW
2048}
2049
e5b32ae3 2050#define __gen_fwtable_read(x) \
3967018e 2051static u##x \
e5b32ae3
MR
2052fwtable_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) \
2053{ \
6863b76c 2054 enum forcewake_domains fw_engine; \
51f67885 2055 GEN6_READ_HEADER(x); \
e5b32ae3 2056 fw_engine = __fwtable_reg_read_fw_domains(uncore, offset); \
6a42d0f4 2057 if (fw_engine) \
272c7e52 2058 __force_wake_auto(uncore, fw_engine); \
6cc5ca76 2059 val = __raw_uncore_read##x(uncore, reg); \
51f67885 2060 GEN6_READ_FOOTER; \
940aece4 2061}
ccb2acea 2062
e5b32ae3
MR
2063static enum forcewake_domains
2064fwtable_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) {
2065 return __fwtable_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg));
2066}
ccb2acea 2067
e5b32ae3
MR
2068__gen_fwtable_read(8)
2069__gen_fwtable_read(16)
2070__gen_fwtable_read(32)
2071__gen_fwtable_read(64)
ccb2acea 2072
e5b32ae3 2073#undef __gen_fwtable_read
51f67885
CW
2074#undef GEN6_READ_FOOTER
2075#undef GEN6_READ_HEADER
5d738795 2076
51f67885 2077#define GEN2_WRITE_HEADER \
5d738795 2078 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
87b391b9 2079 assert_rpm_wakelock_held(uncore->rpm); \
907b28c5 2080
51f67885 2081#define GEN2_WRITE_FOOTER
0d965301 2082
51f67885 2083#define __gen2_write(x) \
0b274481 2084static void \
a2b4abfc 2085gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
51f67885 2086 GEN2_WRITE_HEADER; \
6cc5ca76 2087 __raw_uncore_write##x(uncore, reg, val); \
51f67885 2088 GEN2_WRITE_FOOTER; \
4032ef43
BW
2089}
2090
2091#define __gen5_write(x) \
2092static void \
a2b4abfc 2093gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
51f67885 2094 GEN2_WRITE_HEADER; \
6ebc9692 2095 ilk_dummy_write(uncore); \
6cc5ca76 2096 __raw_uncore_write##x(uncore, reg, val); \
51f67885 2097 GEN2_WRITE_FOOTER; \
4032ef43
BW
2098}
2099
51f67885
CW
2100__gen5_write(8)
2101__gen5_write(16)
2102__gen5_write(32)
51f67885
CW
2103__gen2_write(8)
2104__gen2_write(16)
2105__gen2_write(32)
51f67885
CW
2106
2107#undef __gen5_write
2108#undef __gen2_write
2109
2110#undef GEN2_WRITE_FOOTER
2111#undef GEN2_WRITE_HEADER
2112
2113#define GEN6_WRITE_HEADER \
f0f59a00 2114 u32 offset = i915_mmio_reg_offset(reg); \
51f67885 2115 unsigned long irqflags; \
d823445b 2116 bool unclaimed_reg_debug; \
51f67885 2117 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
87b391b9 2118 assert_rpm_wakelock_held(uncore->rpm); \
272c7e52 2119 spin_lock_irqsave(&uncore->lock, irqflags); \
d823445b 2120 unclaimed_reg_debug = unclaimed_reg_debug_header(uncore, reg, false)
51f67885
CW
2121
2122#define GEN6_WRITE_FOOTER \
d823445b
JN
2123 if (unclaimed_reg_debug) \
2124 unclaimed_reg_debug_footer(uncore, reg, false); \
272c7e52 2125 spin_unlock_irqrestore(&uncore->lock, irqflags)
51f67885 2126
4032ef43
BW
2127#define __gen6_write(x) \
2128static void \
a2b4abfc 2129gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
51f67885 2130 GEN6_WRITE_HEADER; \
a338908c 2131 if (NEEDS_FORCE_WAKE(offset)) \
6ebc9692 2132 __gen6_gt_wait_for_fifo(uncore); \
6cc5ca76 2133 __raw_uncore_write##x(uncore, reg, val); \
51f67885 2134 GEN6_WRITE_FOOTER; \
4032ef43 2135}
ccb2acea
DCS
2136__gen6_write(8)
2137__gen6_write(16)
2138__gen6_write(32)
4032ef43 2139
aef02736 2140#define __gen_fwtable_write(x) \
ab2aa47e 2141static void \
aef02736 2142fwtable_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
6863b76c 2143 enum forcewake_domains fw_engine; \
51f67885 2144 GEN6_WRITE_HEADER; \
aef02736 2145 fw_engine = __fwtable_reg_write_fw_domains(uncore, offset); \
6a42d0f4 2146 if (fw_engine) \
272c7e52 2147 __force_wake_auto(uncore, fw_engine); \
6cc5ca76 2148 __raw_uncore_write##x(uncore, reg, val); \
51f67885 2149 GEN6_WRITE_FOOTER; \
1938e59a 2150}
4032ef43 2151
aef02736
MR
2152static enum forcewake_domains
2153fwtable_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
2154{
2155 return __fwtable_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg));
2156}
ccb2acea 2157
aef02736
MR
2158__gen_fwtable_write(8)
2159__gen_fwtable_write(16)
2160__gen_fwtable_write(32)
ccb2acea 2161
aef02736 2162#undef __gen_fwtable_write
51f67885
CW
2163#undef GEN6_WRITE_FOOTER
2164#undef GEN6_WRITE_HEADER
907b28c5 2165
0e65ce24
CW
2166#define __vgpu_write(x) \
2167static void \
2168vgpu_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
2169 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
2170 __raw_uncore_write##x(uncore, reg, val); \
2171}
2172__vgpu_write(8)
2173__vgpu_write(16)
2174__vgpu_write(32)
2175
ccb2acea 2176#define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \
43d942a7 2177do { \
f7de5027
DCS
2178 (uncore)->funcs.mmio_writeb = x##_write8; \
2179 (uncore)->funcs.mmio_writew = x##_write16; \
2180 (uncore)->funcs.mmio_writel = x##_write32; \
43d942a7
YZ
2181} while (0)
2182
ccb2acea 2183#define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x) \
43d942a7 2184do { \
f7de5027
DCS
2185 (uncore)->funcs.mmio_readb = x##_read8; \
2186 (uncore)->funcs.mmio_readw = x##_read16; \
2187 (uncore)->funcs.mmio_readl = x##_read32; \
2188 (uncore)->funcs.mmio_readq = x##_read64; \
43d942a7
YZ
2189} while (0)
2190
ccb2acea
DCS
2191#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
2192do { \
2193 ASSIGN_RAW_WRITE_MMIO_VFUNCS((uncore), x); \
2194 (uncore)->funcs.write_fw_domains = x##_reg_write_fw_domains; \
2195} while (0)
2196
2197#define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
2198do { \
2199 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x); \
2200 (uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \
2201} while (0)
05a2fb15 2202
f833cdb0
DCS
2203static int __fw_domain_init(struct intel_uncore *uncore,
2204 enum forcewake_domain_id domain_id,
2205 i915_reg_t reg_set,
2206 i915_reg_t reg_ack)
05a2fb15
MK
2207{
2208 struct intel_uncore_forcewake_domain *d;
2209
f833cdb0
DCS
2210 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
2211 GEM_BUG_ON(uncore->fw_domain[domain_id]);
05a2fb15 2212
50d84418 2213 if (i915_inject_probe_failure(uncore->i915))
f833cdb0 2214 return -ENOMEM;
05a2fb15 2215
f833cdb0
DCS
2216 d = kzalloc(sizeof(*d), GFP_KERNEL);
2217 if (!d)
2218 return -ENOMEM;
05a2fb15 2219
a9f236d1
PB
2220 drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_set));
2221 drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_ack));
6e3955a5 2222
f833cdb0 2223 d->uncore = uncore;
05a2fb15 2224 d->wake_count = 0;
eefac38a
MR
2225 d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set) + uncore->gsi_offset;
2226 d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack) + uncore->gsi_offset;
05a2fb15 2227
05a2fb15
MK
2228 d->id = domain_id;
2229
33c582c1 2230 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
55e3c170 2231 BUILD_BUG_ON(FORCEWAKE_GT != (1 << FW_DOMAIN_ID_GT));
33c582c1 2232 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
a89a70a8
DCS
2233 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0));
2234 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1));
2235 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2));
2236 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3));
bfac1e2b
MR
2237 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX4 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX4));
2238 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX5 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX5));
2239 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX6 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX6));
2240 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX7 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX7));
a89a70a8
DCS
2241 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0));
2242 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1));
bfac1e2b
MR
2243 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX2));
2244 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX3));
14f2f9bf 2245 BUILD_BUG_ON(FORCEWAKE_GSC != (1 << FW_DOMAIN_ID_GSC));
a89a70a8 2246
d2dc94bc 2247 d->mask = BIT(domain_id);
33c582c1 2248
a57a4a67
TU
2249 hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2250 d->timer.function = intel_uncore_fw_release_timer;
05a2fb15 2251
535d8d27 2252 uncore->fw_domains |= BIT(domain_id);
f9b3927a 2253
159367bb 2254 fw_domain_reset(d);
f833cdb0
DCS
2255
2256 uncore->fw_domain[domain_id] = d;
2257
2258 return 0;
05a2fb15
MK
2259}
2260
f7de5027 2261static void fw_domain_fini(struct intel_uncore *uncore,
26376a7e
OM
2262 enum forcewake_domain_id domain_id)
2263{
2264 struct intel_uncore_forcewake_domain *d;
2265
f833cdb0 2266 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
26376a7e 2267
f833cdb0
DCS
2268 d = fetch_and_zero(&uncore->fw_domain[domain_id]);
2269 if (!d)
2270 return;
26376a7e 2271
f833cdb0 2272 uncore->fw_domains &= ~BIT(domain_id);
a9f236d1
PB
2273 drm_WARN_ON(&uncore->i915->drm, d->wake_count);
2274 drm_WARN_ON(&uncore->i915->drm, hrtimer_cancel(&d->timer));
f833cdb0
DCS
2275 kfree(d);
2276}
26376a7e 2277
f833cdb0
DCS
2278static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore)
2279{
2280 struct intel_uncore_forcewake_domain *d;
2281 int tmp;
2282
2283 for_each_fw_domain(d, uncore, tmp)
2284 fw_domain_fini(uncore, d->id);
26376a7e
OM
2285}
2286
5716c8c6
DA
2287static const struct intel_uncore_fw_get uncore_get_fallback = {
2288 .force_wake_get = fw_domains_get_with_fallback
2289};
2290
2291static const struct intel_uncore_fw_get uncore_get_normal = {
2292 .force_wake_get = fw_domains_get_normal,
2293};
2294
2295static const struct intel_uncore_fw_get uncore_get_thread_status = {
2296 .force_wake_get = fw_domains_get_with_thread_status
2297};
2298
f833cdb0 2299static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
0b274481 2300{
01385758 2301 struct drm_i915_private *i915 = uncore->i915;
f833cdb0 2302 int ret = 0;
f7de5027 2303
2e81bc61 2304 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
3225b2f9 2305
f833cdb0
DCS
2306#define fw_domain_init(uncore__, id__, set__, ack__) \
2307 (ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__))))
2308
651e7d48 2309 if (GRAPHICS_VER(i915) >= 11) {
14f2f9bf 2310 intel_engine_mask_t emask;
a89a70a8
DCS
2311 int i;
2312
14f2f9bf
MR
2313 /* we'll prune the domains of missing engines later */
2314 emask = uncore->gt->info.engine_mask;
2315
5716c8c6 2316 uncore->fw_get_funcs = &uncore_get_fallback;
14f2f9bf
MR
2317 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
2318 fw_domain_init(uncore, FW_DOMAIN_ID_GT,
2319 FORCEWAKE_GT_GEN9,
2320 FORCEWAKE_ACK_GT_MTL);
2321 else
2322 fw_domain_init(uncore, FW_DOMAIN_ID_GT,
2323 FORCEWAKE_GT_GEN9,
2324 FORCEWAKE_ACK_GT_GEN9);
2325
2326 if (RCS_MASK(uncore->gt) || CCS_MASK(uncore->gt))
2327 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2328 FORCEWAKE_RENDER_GEN9,
2329 FORCEWAKE_ACK_RENDER_GEN9);
f833cdb0 2330
a89a70a8 2331 for (i = 0; i < I915_MAX_VCS; i++) {
242613af 2332 if (!__HAS_ENGINE(emask, _VCS(i)))
a89a70a8
DCS
2333 continue;
2334
f7de5027 2335 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
a89a70a8
DCS
2336 FORCEWAKE_MEDIA_VDBOX_GEN11(i),
2337 FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
2338 }
2339 for (i = 0; i < I915_MAX_VECS; i++) {
242613af 2340 if (!__HAS_ENGINE(emask, _VECS(i)))
a89a70a8
DCS
2341 continue;
2342
f7de5027 2343 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
a89a70a8
DCS
2344 FORCEWAKE_MEDIA_VEBOX_GEN11(i),
2345 FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
2346 }
14f2f9bf
MR
2347
2348 if (uncore->gt->type == GT_MEDIA)
2349 fw_domain_init(uncore, FW_DOMAIN_ID_GSC,
2350 FORCEWAKE_REQ_GSC, FORCEWAKE_ACK_GSC);
651e7d48 2351 } else if (IS_GRAPHICS_VER(i915, 9, 10)) {
5716c8c6 2352 uncore->fw_get_funcs = &uncore_get_fallback;
f7de5027 2353 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
05a2fb15
MK
2354 FORCEWAKE_RENDER_GEN9,
2355 FORCEWAKE_ACK_RENDER_GEN9);
55e3c170
MR
2356 fw_domain_init(uncore, FW_DOMAIN_ID_GT,
2357 FORCEWAKE_GT_GEN9,
2358 FORCEWAKE_ACK_GT_GEN9);
f7de5027 2359 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
05a2fb15 2360 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
f7de5027 2361 } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
5716c8c6 2362 uncore->fw_get_funcs = &uncore_get_normal;
f7de5027 2363 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
05a2fb15 2364 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
f7de5027 2365 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
05a2fb15 2366 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
f7de5027 2367 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5716c8c6 2368 uncore->fw_get_funcs = &uncore_get_thread_status;
f7de5027 2369 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
05a2fb15 2370 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
f7de5027 2371 } else if (IS_IVYBRIDGE(i915)) {
0b274481
BW
2372 u32 ecobus;
2373
2374 /* IVB configs may use multi-threaded forcewake */
2375
2376 /* A small trick here - if the bios hasn't configured
2377 * MT forcewake, and if the device is in RC6, then
2378 * force_wake_mt_get will not wake the device and the
2379 * ECOBUS read will return zero. Which will be
2380 * (correctly) interpreted by the test below as MT
2381 * forcewake being disabled.
2382 */
5716c8c6 2383 uncore->fw_get_funcs = &uncore_get_thread_status;
05a2fb15 2384
f9b3927a
MK
2385 /* We need to init first for ECOBUS access and then
2386 * determine later if we want to reinit, in case of MT access is
6ea2556f
MK
2387 * not working. In this stage we don't know which flavour this
2388 * ivb is, so it is better to reset also the gen6 fw registers
2389 * before the ecobus check.
f9b3927a 2390 */
6ea2556f 2391
6cc5ca76 2392 __raw_uncore_write32(uncore, FORCEWAKE, 0);
6ebc9692 2393 __raw_posting_read(uncore, ECOBUS);
6ea2556f 2394
f833cdb0
DCS
2395 ret = __fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2396 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
2397 if (ret)
2398 goto out;
f9b3927a 2399
f7de5027
DCS
2400 spin_lock_irq(&uncore->lock);
2401 fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
6cc5ca76 2402 ecobus = __raw_uncore_read32(uncore, ECOBUS);
f7de5027
DCS
2403 fw_domains_put(uncore, FORCEWAKE_RENDER);
2404 spin_unlock_irq(&uncore->lock);
0b274481 2405
05a2fb15 2406 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
d0208cfa
WK
2407 drm_info(&i915->drm, "No MT forcewake available on Ivybridge, this can result in issues\n");
2408 drm_info(&i915->drm, "when using vblank-synced partial screen updates.\n");
f833cdb0 2409 fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER);
f7de5027 2410 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
05a2fb15 2411 FORCEWAKE, FORCEWAKE_ACK);
0b274481 2412 }
651e7d48 2413 } else if (GRAPHICS_VER(i915) == 6) {
5716c8c6 2414 uncore->fw_get_funcs = &uncore_get_thread_status;
f7de5027 2415 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
05a2fb15 2416 FORCEWAKE, FORCEWAKE_ACK);
0b274481 2417 }
3225b2f9 2418
f833cdb0
DCS
2419#undef fw_domain_init
2420
3225b2f9 2421 /* All future platforms are expected to require complex power gating */
48a1b8d4 2422 drm_WARN_ON(&i915->drm, !ret && uncore->fw_domains == 0);
f833cdb0
DCS
2423
2424out:
2425 if (ret)
2426 intel_uncore_fw_domains_fini(uncore);
2427
2428 return ret;
f9b3927a
MK
2429}
2430
f7de5027 2431#define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \
15157970 2432{ \
f7de5027 2433 (uncore)->fw_domains_table = \
15157970 2434 (struct intel_forcewake_range *)(d); \
f7de5027 2435 (uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \
15157970
TU
2436}
2437
6cdbb101
MR
2438#define ASSIGN_SHADOW_TABLE(uncore, d) \
2439{ \
2440 (uncore)->shadowed_reg_table = d; \
2441 (uncore)->shadowed_reg_table_entries = ARRAY_SIZE((d)); \
2442}
2443
264ec1a8
HG
2444static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
2445 unsigned long action, void *data)
2446{
9102650f
DCS
2447 struct intel_uncore *uncore = container_of(nb,
2448 struct intel_uncore, pmic_bus_access_nb);
264ec1a8
HG
2449
2450 switch (action) {
2451 case MBI_PMIC_BUS_ACCESS_BEGIN:
2452 /*
2453 * forcewake all now to make sure that we don't need to do a
2454 * forcewake later which on systems where this notifier gets
2455 * called requires the punit to access to the shared pmic i2c
2456 * bus, which will be busy after this notification, leading to:
2457 * "render: timed out waiting for forcewake ack request."
2458 * errors.
ce30560c
HG
2459 *
2460 * The notifier is unregistered during intel_runtime_suspend(),
2461 * so it's ok to access the HW here without holding a RPM
2462 * wake reference -> disable wakeref asserts for the time of
2463 * the access.
264ec1a8 2464 */
9102650f
DCS
2465 disable_rpm_wakeref_asserts(uncore->rpm);
2466 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
2467 enable_rpm_wakeref_asserts(uncore->rpm);
264ec1a8
HG
2468 break;
2469 case MBI_PMIC_BUS_ACCESS_END:
9102650f 2470 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
264ec1a8
HG
2471 break;
2472 }
2473
2474 return NOTIFY_OK;
2475}
2476
9ebb80e8
MR
2477static void uncore_unmap_mmio(struct drm_device *drm, void *regs)
2478{
70994bec 2479 iounmap((void __iomem *)regs);
9ebb80e8
MR
2480}
2481
bec68cc9 2482int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr)
25286aac 2483{
01385758 2484 struct drm_i915_private *i915 = uncore->i915;
25286aac
DCS
2485 int mmio_size;
2486
25286aac
DCS
2487 /*
2488 * Before gen4, the registers and the GTT are behind different BARs.
2489 * However, from gen4 onwards, the registers and the GTT are shared
2490 * in the same BAR, so we want to restrict this ioremap from
2491 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
2492 * the register BAR remains the same size for all the earlier
2493 * generations up to Ironlake.
da30390b
MR
2494 * For dgfx chips register range is expanded to 4MB, and this larger
2495 * range is also used for integrated gpus beginning with Meteor Lake.
25286aac 2496 */
da30390b 2497 if (IS_DGFX(i915) || GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
eafeb204 2498 mmio_size = 4 * 1024 * 1024;
da30390b 2499 else if (GRAPHICS_VER(i915) >= 5)
25286aac 2500 mmio_size = 2 * 1024 * 1024;
da30390b
MR
2501 else
2502 mmio_size = 512 * 1024;
eafeb204 2503
bec68cc9 2504 uncore->regs = ioremap(phys_addr, mmio_size);
25286aac 2505 if (uncore->regs == NULL) {
d0208cfa 2506 drm_err(&i915->drm, "failed to map registers\n");
25286aac
DCS
2507 return -EIO;
2508 }
2509
70994bec
JN
2510 return drmm_add_action_or_reset(&i915->drm, uncore_unmap_mmio,
2511 (void __force *)uncore->regs);
25286aac
DCS
2512}
2513
01385758 2514void intel_uncore_init_early(struct intel_uncore *uncore,
030def2c 2515 struct intel_gt *gt)
6cbe8830
DCS
2516{
2517 spin_lock_init(&uncore->lock);
030def2c
MW
2518 uncore->i915 = gt->i915;
2519 uncore->gt = gt;
2520 uncore->rpm = &gt->i915->runtime_pm;
6cbe8830 2521}
25286aac 2522
2e81bc61 2523static void uncore_raw_init(struct intel_uncore *uncore)
f9b3927a 2524{
2e81bc61 2525 GEM_BUG_ON(intel_uncore_has_forcewake(uncore));
25286aac 2526
0e65ce24
CW
2527 if (intel_vgpu_active(uncore->i915)) {
2528 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, vgpu);
2529 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, vgpu);
651e7d48 2530 } else if (GRAPHICS_VER(uncore->i915) == 5) {
2e81bc61
DCS
2531 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5);
2532 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5);
2533 } else {
2534 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2);
2535 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2);
2536 }
2537}
f7de5027 2538
14f2f9bf
MR
2539static int uncore_media_forcewake_init(struct intel_uncore *uncore)
2540{
2541 struct drm_i915_private *i915 = uncore->i915;
2542
2543 if (MEDIA_VER(i915) >= 13) {
2544 ASSIGN_FW_DOMAINS_TABLE(uncore, __xelpmp_fw_ranges);
2545 ASSIGN_SHADOW_TABLE(uncore, xelpmp_shadowed_regs);
2546 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2547 } else {
2548 MISSING_CASE(MEDIA_VER(i915));
2549 return -ENODEV;
2550 }
2551
2552 return 0;
2553}
2554
f833cdb0 2555static int uncore_forcewake_init(struct intel_uncore *uncore)
2e81bc61
DCS
2556{
2557 struct drm_i915_private *i915 = uncore->i915;
f833cdb0 2558 int ret;
cf9d2890 2559
2e81bc61 2560 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
5a0ba777 2561
f833cdb0
DCS
2562 ret = intel_uncore_fw_domains_init(uncore);
2563 if (ret)
2564 return ret;
2e81bc61 2565 forcewake_early_sanitize(uncore, 0);
75714940 2566
54fc4f13
MR
2567 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
2568
14f2f9bf
MR
2569 if (uncore->gt->type == GT_MEDIA)
2570 return uncore_media_forcewake_init(uncore);
2571
2572 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) {
2573 ASSIGN_FW_DOMAINS_TABLE(uncore, __mtl_fw_ranges);
2574 ASSIGN_SHADOW_TABLE(uncore, mtl_shadowed_regs);
2575 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2576 } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 60)) {
fb289464
MR
2577 ASSIGN_FW_DOMAINS_TABLE(uncore, __pvc_fw_ranges);
2578 ASSIGN_SHADOW_TABLE(uncore, pvc_shadowed_regs);
2579 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2580 } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
e0531636 2581 ASSIGN_FW_DOMAINS_TABLE(uncore, __dg2_fw_ranges);
c74e66d4 2582 ASSIGN_SHADOW_TABLE(uncore, dg2_shadowed_regs);
aef02736 2583 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
e0531636 2584 } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
bfac1e2b 2585 ASSIGN_FW_DOMAINS_TABLE(uncore, __xehp_fw_ranges);
6cdbb101 2586 ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs);
aef02736 2587 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
bfac1e2b 2588 } else if (GRAPHICS_VER(i915) >= 12) {
cf82d9dd 2589 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges);
6cdbb101 2590 ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs);
aef02736 2591 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
bfac1e2b
MR
2592 } else if (GRAPHICS_VER(i915) == 11) {
2593 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
6cdbb101 2594 ASSIGN_SHADOW_TABLE(uncore, gen11_shadowed_regs);
aef02736 2595 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
bfac1e2b
MR
2596 } else if (IS_GRAPHICS_VER(i915, 9, 10)) {
2597 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
6cdbb101 2598 ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
bfac1e2b 2599 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
bfac1e2b
MR
2600 } else if (IS_CHERRYVIEW(i915)) {
2601 ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
6cdbb101 2602 ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
bfac1e2b 2603 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
bfac1e2b 2604 } else if (GRAPHICS_VER(i915) == 8) {
1ab2b4cd 2605 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges);
6cdbb101 2606 ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
09b2a597 2607 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
bfac1e2b
MR
2608 } else if (IS_VALLEYVIEW(i915)) {
2609 ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges);
2610 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
bfac1e2b 2611 } else if (IS_GRAPHICS_VER(i915, 6, 7)) {
1ab2b4cd 2612 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges);
bfac1e2b 2613 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
3967018e 2614 }
ed493883 2615
2e81bc61
DCS
2616 uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier;
2617 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
f833cdb0
DCS
2618
2619 return 0;
2e81bc61
DCS
2620}
2621
de414973
MR
2622static int sanity_check_mmio_access(struct intel_uncore *uncore)
2623{
2624 struct drm_i915_private *i915 = uncore->i915;
2625
2626 if (GRAPHICS_VER(i915) < 8)
2627 return 0;
2628
2629 /*
2630 * Sanitycheck that MMIO access to the device is working properly. If
2631 * the CPU is unable to communcate with a PCI device, BAR reads will
2632 * return 0xFFFFFFFF. Let's make sure the device isn't in this state
2633 * before we start trying to access registers.
2634 *
2635 * We use the primary GT's forcewake register as our guinea pig since
2636 * it's been around since HSW and it's a masked register so the upper
2637 * 16 bits can never read back as 1's if device access is operating
2638 * properly.
2639 *
2640 * If MMIO isn't working, we'll wait up to 2 seconds to see if it
2641 * recovers, then give up.
2642 */
2643#define COND (__raw_uncore_read32(uncore, FORCEWAKE_MT) != ~0)
2644 if (wait_for(COND, 2000) == -ETIMEDOUT) {
2645 drm_err(&i915->drm, "Device is non-operational; MMIO access returns 0xFFFFFFFF!\n");
2646 return -EIO;
2647 }
2648
2649 return 0;
2650}
2651
2e81bc61
DCS
2652int intel_uncore_init_mmio(struct intel_uncore *uncore)
2653{
2654 struct drm_i915_private *i915 = uncore->i915;
2655 int ret;
2656
de414973
MR
2657 ret = sanity_check_mmio_access(uncore);
2658 if (ret)
2659 return ret;
2660
c256af0d
MR
2661 /*
2662 * The boot firmware initializes local memory and assesses its health.
2663 * If memory training fails, the punit will have been instructed to
2664 * keep the GT powered down; we won't be able to communicate with it
2665 * and we should not continue with driver initialization.
2666 */
2667 if (IS_DGFX(i915) &&
2668 !(__raw_uncore_read32(uncore, GU_CNTL) & LMEM_INIT)) {
2669 drm_err(&i915->drm, "LMEM not initialized by firmware\n");
2670 return -ENODEV;
2671 }
2672
d70cc074 2673 if (GRAPHICS_VER(i915) > 5 && !intel_vgpu_active(i915))
2e81bc61
DCS
2674 uncore->flags |= UNCORE_HAS_FORCEWAKE;
2675
f833cdb0 2676 if (!intel_uncore_has_forcewake(uncore)) {
2e81bc61 2677 uncore_raw_init(uncore);
f833cdb0
DCS
2678 } else {
2679 ret = uncore_forcewake_init(uncore);
2680 if (ret)
498f02b6 2681 return ret;
f833cdb0 2682 }
2e81bc61 2683
ccb2acea 2684 /* make sure fw funcs are set if and only if we have fw*/
5716c8c6 2685 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->fw_get_funcs);
ccb2acea
DCS
2686 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains);
2687 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains);
2688
2cf7bf6f
DCS
2689 if (HAS_FPGA_DBG_UNCLAIMED(i915))
2690 uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED;
2691
2692 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
2693 uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED;
2694
651e7d48 2695 if (IS_GRAPHICS_VER(i915, 6, 7))
2cf7bf6f
DCS
2696 uncore->flags |= UNCORE_HAS_FIFO;
2697
2e81bc61 2698 /* clear out unclaimed reg detection bit */
0a9b2630 2699 if (intel_uncore_unclaimed_mmio(uncore))
d0208cfa 2700 drm_dbg(&i915->drm, "unclaimed mmio detected on uncore init, clearing\n");
25286aac
DCS
2701
2702 return 0;
0b274481
BW
2703}
2704
26376a7e
OM
2705/*
2706 * We might have detected that some engines are fused off after we initialized
2707 * the forcewake domains. Prune them, to make sure they only reference existing
2708 * engines.
2709 */
242613af
DCS
2710void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
2711 struct intel_gt *gt)
26376a7e 2712{
2e81bc61
DCS
2713 enum forcewake_domains fw_domains = uncore->fw_domains;
2714 enum forcewake_domain_id domain_id;
2715 int i;
f7de5027 2716
651e7d48 2717 if (!intel_uncore_has_forcewake(uncore) || GRAPHICS_VER(uncore->i915) < 11)
2e81bc61 2718 return;
26376a7e 2719
2e81bc61
DCS
2720 for (i = 0; i < I915_MAX_VCS; i++) {
2721 domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
26376a7e 2722
242613af 2723 if (HAS_ENGINE(gt, _VCS(i)))
2e81bc61 2724 continue;
26376a7e 2725
bfac1e2b
MR
2726 /*
2727 * Starting with XeHP, the power well for an even-numbered
2728 * VDBOX is also used for shared units within the
2729 * media slice such as SFC. So even if the engine
2730 * itself is fused off, we still need to initialize
2731 * the forcewake domain if any of the other engines
2732 * in the same media slice are present.
2733 */
2734 if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 50) && i % 2 == 0) {
2735 if ((i + 1 < I915_MAX_VCS) && HAS_ENGINE(gt, _VCS(i + 1)))
2736 continue;
2737
2738 if (HAS_ENGINE(gt, _VECS(i / 2)))
2739 continue;
2740 }
2741
2e81bc61
DCS
2742 if (fw_domains & BIT(domain_id))
2743 fw_domain_fini(uncore, domain_id);
2744 }
26376a7e 2745
2e81bc61
DCS
2746 for (i = 0; i < I915_MAX_VECS; i++) {
2747 domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
26376a7e 2748
242613af 2749 if (HAS_ENGINE(gt, _VECS(i)))
2e81bc61 2750 continue;
26376a7e 2751
2e81bc61
DCS
2752 if (fw_domains & BIT(domain_id))
2753 fw_domain_fini(uncore, domain_id);
26376a7e 2754 }
6b7cbdbe
JC
2755
2756 if ((fw_domains & BIT(FW_DOMAIN_ID_GSC)) && !HAS_ENGINE(gt, GSC0))
2757 fw_domain_fini(uncore, FW_DOMAIN_ID_GSC);
26376a7e
OM
2758}
2759
5a44fcd7
DCS
2760/*
2761 * The driver-initiated FLR is the highest level of reset that we can trigger
2762 * from within the driver. It is different from the PCI FLR in that it doesn't
2763 * fully reset the SGUnit and doesn't modify the PCI config space and therefore
2764 * it doesn't require a re-enumeration of the PCI BARs. However, the
2765 * driver-initiated FLR does still cause a reset of both GT and display and a
2766 * memory wipe of local and stolen memory, so recovery would require a full HW
2767 * re-init and saving/restoring (or re-populating) the wiped memory. Since we
2768 * perform the FLR as the very last action before releasing access to the HW
2769 * during the driver release flow, we don't attempt recovery at all, because
2770 * if/when a new instance of i915 is bound to the device it will do a full
2771 * re-init anyway.
2772 */
2773static void driver_initiated_flr(struct intel_uncore *uncore)
2774{
2775 struct drm_i915_private *i915 = uncore->i915;
2776 const unsigned int flr_timeout_ms = 3000; /* specs recommend a 3s wait */
2777 int ret;
2778
2779 drm_dbg(&i915->drm, "Triggering Driver-FLR\n");
2780
2781 /*
2782 * Make sure any pending FLR requests have cleared by waiting for the
2783 * FLR trigger bit to go to zero. Also clear GU_DEBUG's DRIVERFLR_STATUS
2784 * to make sure it's not still set from a prior attempt (it's a write to
2785 * clear bit).
2786 * Note that we should never be in a situation where a previous attempt
2787 * is still pending (unless the HW is totally dead), but better to be
2788 * safe in case something unexpected happens
2789 */
2790 ret = intel_wait_for_register_fw(uncore, GU_CNTL, DRIVERFLR, 0, flr_timeout_ms);
2791 if (ret) {
2792 drm_err(&i915->drm,
2793 "Failed to wait for Driver-FLR bit to clear! %d\n",
2794 ret);
2795 return;
2796 }
2797 intel_uncore_write_fw(uncore, GU_DEBUG, DRIVERFLR_STATUS);
2798
2799 /* Trigger the actual Driver-FLR */
2800 intel_uncore_rmw_fw(uncore, GU_CNTL, 0, DRIVERFLR);
2801
0591bdad
AP
2802 /* Wait for hardware teardown to complete */
2803 ret = intel_wait_for_register_fw(uncore, GU_CNTL,
2804 DRIVERFLR, 0,
2805 flr_timeout_ms);
2806 if (ret) {
2807 drm_err(&i915->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret);
2808 return;
2809 }
2810
2811 /* Wait for hardware/firmware re-init to complete */
5a44fcd7
DCS
2812 ret = intel_wait_for_register_fw(uncore, GU_DEBUG,
2813 DRIVERFLR_STATUS, DRIVERFLR_STATUS,
2814 flr_timeout_ms);
2815 if (ret) {
0591bdad 2816 drm_err(&i915->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret);
5a44fcd7
DCS
2817 return;
2818 }
2819
0591bdad 2820 /* Clear sticky completion status */
5a44fcd7
DCS
2821 intel_uncore_write_fw(uncore, GU_DEBUG, DRIVERFLR_STATUS);
2822}
2823
cfb0fa42
MR
2824/* Called via drm-managed action */
2825void intel_uncore_fini_mmio(struct drm_device *dev, void *data)
0b274481 2826{
cfb0fa42
MR
2827 struct intel_uncore *uncore = data;
2828
2e81bc61
DCS
2829 if (intel_uncore_has_forcewake(uncore)) {
2830 iosf_mbi_punit_acquire();
2831 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
2832 &uncore->pmic_bus_access_nb);
2833 intel_uncore_forcewake_reset(uncore);
f833cdb0 2834 intel_uncore_fw_domains_fini(uncore);
2e81bc61
DCS
2835 iosf_mbi_punit_release();
2836 }
5a44fcd7
DCS
2837
2838 if (intel_uncore_needs_flr_on_fini(uncore))
2839 driver_initiated_flr(uncore);
0b274481
BW
2840}
2841
1758b90e 2842/**
1d1a9774 2843 * __intel_wait_for_register_fw - wait until register matches expected state
d2d551c0 2844 * @uncore: the struct intel_uncore
1758b90e
CW
2845 * @reg: the register to read
2846 * @mask: mask to apply to register value
2847 * @value: expected value
1d1a9774
MW
2848 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
2849 * @slow_timeout_ms: slow timeout in millisecond
2850 * @out_value: optional placeholder to hold registry value
1758b90e
CW
2851 *
2852 * This routine waits until the target register @reg contains the expected
3d466cd6
DV
2853 * @value after applying the @mask, i.e. it waits until ::
2854 *
669f3f2b 2855 * (intel_uncore_read_fw(uncore, reg) & mask) == value
3d466cd6 2856 *
1d1a9774 2857 * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
6976e74b 2858 * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
84d84cb7 2859 * must be not larger than 20,0000 microseconds.
1758b90e
CW
2860 *
2861 * Note that this routine assumes the caller holds forcewake asserted, it is
2862 * not suitable for very long waits. See intel_wait_for_register() if you
2863 * wish to wait without holding forcewake for the duration (i.e. you expect
2864 * the wait to be slow).
2865 *
e4661f14 2866 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
1758b90e 2867 */
d2d551c0 2868int __intel_wait_for_register_fw(struct intel_uncore *uncore,
1d1a9774 2869 i915_reg_t reg,
3fc7d86b
MW
2870 u32 mask,
2871 u32 value,
2872 unsigned int fast_timeout_us,
2873 unsigned int slow_timeout_ms,
1d1a9774 2874 u32 *out_value)
1758b90e 2875{
b79ffa91 2876 u32 reg_value = 0;
d2d551c0 2877#define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value)
1d1a9774
MW
2878 int ret;
2879
6976e74b 2880 /* Catch any overuse of this function */
84d84cb7
CW
2881 might_sleep_if(slow_timeout_ms);
2882 GEM_BUG_ON(fast_timeout_us > 20000);
b79ffa91 2883 GEM_BUG_ON(!fast_timeout_us && !slow_timeout_ms);
6976e74b 2884
84d84cb7
CW
2885 ret = -ETIMEDOUT;
2886 if (fast_timeout_us && fast_timeout_us <= 20000)
1d1a9774 2887 ret = _wait_for_atomic(done, fast_timeout_us, 0);
ff26ffa8 2888 if (ret && slow_timeout_ms)
1d1a9774 2889 ret = wait_for(done, slow_timeout_ms);
84d84cb7 2890
1d1a9774
MW
2891 if (out_value)
2892 *out_value = reg_value;
84d84cb7 2893
1758b90e
CW
2894 return ret;
2895#undef done
2896}
2897
2898/**
23fdbdd7 2899 * __intel_wait_for_register - wait until register matches expected state
baba6e57 2900 * @uncore: the struct intel_uncore
1758b90e
CW
2901 * @reg: the register to read
2902 * @mask: mask to apply to register value
2903 * @value: expected value
23fdbdd7
SP
2904 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
2905 * @slow_timeout_ms: slow timeout in millisecond
2906 * @out_value: optional placeholder to hold registry value
1758b90e
CW
2907 *
2908 * This routine waits until the target register @reg contains the expected
3d466cd6
DV
2909 * @value after applying the @mask, i.e. it waits until ::
2910 *
54b3f0e6 2911 * (intel_uncore_read(uncore, reg) & mask) == value
3d466cd6 2912 *
1758b90e
CW
2913 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
2914 *
e4661f14 2915 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
1758b90e 2916 */
97a04e0d
DCS
2917int __intel_wait_for_register(struct intel_uncore *uncore,
2918 i915_reg_t reg,
2919 u32 mask,
2920 u32 value,
2921 unsigned int fast_timeout_us,
2922 unsigned int slow_timeout_ms,
2923 u32 *out_value)
2924{
1758b90e 2925 unsigned fw =
4319382e 2926 intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
23fdbdd7 2927 u32 reg_value;
1758b90e
CW
2928 int ret;
2929
3df82dd4 2930 might_sleep_if(slow_timeout_ms);
05646543 2931
272c7e52
DCS
2932 spin_lock_irq(&uncore->lock);
2933 intel_uncore_forcewake_get__locked(uncore, fw);
05646543 2934
d2d551c0 2935 ret = __intel_wait_for_register_fw(uncore,
05646543 2936 reg, mask, value,
23fdbdd7 2937 fast_timeout_us, 0, &reg_value);
05646543 2938
272c7e52
DCS
2939 intel_uncore_forcewake_put__locked(uncore, fw);
2940 spin_unlock_irq(&uncore->lock);
05646543 2941
3df82dd4 2942 if (ret && slow_timeout_ms)
d2d551c0
DCS
2943 ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore,
2944 reg),
23fdbdd7
SP
2945 (reg_value & mask) == value,
2946 slow_timeout_ms * 1000, 10, 1000);
2947
39806c3f
VS
2948 /* just trace the final value */
2949 trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
2950
23fdbdd7
SP
2951 if (out_value)
2952 *out_value = reg_value;
1758b90e
CW
2953
2954 return ret;
d431440c
TE
2955}
2956
2cf7bf6f 2957bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
907b28c5 2958{
0a9b2630
DCS
2959 bool ret;
2960
639e30ee
MR
2961 if (!uncore->debug)
2962 return false;
2963
0a9b2630
DCS
2964 spin_lock_irq(&uncore->debug->lock);
2965 ret = check_for_unclaimed_mmio(uncore);
2966 spin_unlock_irq(&uncore->debug->lock);
2967
2968 return ret;
907b28c5 2969}
75714940 2970
bc3b9346 2971bool
2cf7bf6f 2972intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
75714940 2973{
a167b1e1
CW
2974 bool ret = false;
2975
639e30ee
MR
2976 if (drm_WARN_ON(&uncore->i915->drm, !uncore->debug))
2977 return false;
2978
0a9b2630 2979 spin_lock_irq(&uncore->debug->lock);
a167b1e1 2980
0a9b2630 2981 if (unlikely(uncore->debug->unclaimed_mmio_check <= 0))
a167b1e1 2982 goto out;
75714940 2983
0a9b2630 2984 if (unlikely(check_for_unclaimed_mmio(uncore))) {
8a25c4be 2985 if (!uncore->i915->params.mmio_debug) {
d0208cfa
WK
2986 drm_dbg(&uncore->i915->drm,
2987 "Unclaimed register detected, "
2988 "enabling oneshot unclaimed register reporting. "
2989 "Please use i915.mmio_debug=N for more information.\n");
8a25c4be 2990 uncore->i915->params.mmio_debug++;
7ef4ac6e 2991 }
0a9b2630 2992 uncore->debug->unclaimed_mmio_check--;
a167b1e1 2993 ret = true;
75714940 2994 }
bc3b9346 2995
a167b1e1 2996out:
0a9b2630 2997 spin_unlock_irq(&uncore->debug->lock);
a167b1e1
CW
2998
2999 return ret;
75714940 3000}
3756685a 3001
3756685a
TU
3002/**
3003 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
3004 * a register
4319382e 3005 * @uncore: pointer to struct intel_uncore
3756685a
TU
3006 * @reg: register in question
3007 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
3008 *
3009 * Returns a set of forcewake domains required to be taken with for example
3010 * intel_uncore_forcewake_get for the specified register to be accessible in the
3011 * specified mode (read, write or read/write) with raw mmio accessors.
3012 *
3013 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
3014 * callers to do FIFO management on their own or risk losing writes.
3015 */
3016enum forcewake_domains
4319382e 3017intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
3756685a
TU
3018 i915_reg_t reg, unsigned int op)
3019{
3020 enum forcewake_domains fw_domains = 0;
3021
a9f236d1 3022 drm_WARN_ON(&uncore->i915->drm, !op);
3756685a 3023
4319382e 3024 if (!intel_uncore_has_forcewake(uncore))
895833bd
TU
3025 return 0;
3026
3756685a 3027 if (op & FW_REG_READ)
ccb2acea 3028 fw_domains = uncore->funcs.read_fw_domains(uncore, reg);
3756685a
TU
3029
3030 if (op & FW_REG_WRITE)
ccb2acea
DCS
3031 fw_domains |= uncore->funcs.write_fw_domains(uncore, reg);
3032
a9f236d1 3033 drm_WARN_ON(&uncore->i915->drm, fw_domains & ~uncore->fw_domains);
3756685a
TU
3034
3035 return fw_domains;
3036}
26e7a2a1
CW
3037
3038#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
0757ac8f 3039#include "selftests/mock_uncore.c"
26e7a2a1
CW
3040#include "selftests/intel_uncore.c"
3041#endif