Merge tag 'probes-fixes-v6.16-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-block.git] / drivers / gpu / drm / i915 / intel_uncore.c
CommitLineData
907b28c5
CW
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
9ebb80e8 24#include <drm/drm_managed.h>
696173b0 25#include <linux/pm_runtime.h>
696173b0 26
a857add7 27#include "gt/intel_gt.h"
202b1f4c 28#include "gt/intel_engine_regs.h"
0d6419e9 29#include "gt/intel_gt_regs.h"
bfac1e2b 30
907b28c5 31#include "i915_drv.h"
f0e204e0 32#include "i915_iosf_mbi.h"
801543b2 33#include "i915_reg.h"
cf9d2890 34#include "i915_vgpu.h"
17d70726 35#include "intel_uncore_trace.h"
6daccb0b 36
83e33372 37#define FORCEWAKE_ACK_TIMEOUT_MS 50
6b07b6d2 38#define GT_FIFO_TIMEOUT_MS 10
907b28c5 39
399f7b67
JN
40struct intel_uncore *to_intel_uncore(struct drm_device *drm)
41{
42 return &to_i915(drm)->uncore;
43}
44
6cc5ca76 45#define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
6af5d92f 46
5716c8c6
DA
47static void
48fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
49{
50 uncore->fw_get_funcs->force_wake_get(uncore, fw_domains);
51}
52
0a9b2630 53void
639e30ee 54intel_uncore_mmio_debug_init_early(struct drm_i915_private *i915)
0a9b2630 55{
639e30ee
MR
56 spin_lock_init(&i915->mmio_debug.lock);
57 i915->mmio_debug.unclaimed_mmio_check = 1;
58
59 i915->uncore.debug = &i915->mmio_debug;
0a9b2630
DCS
60}
61
f16bfc1d 62static void mmio_debug_suspend(struct intel_uncore *uncore)
0a9b2630 63{
639e30ee
MR
64 if (!uncore->debug)
65 return;
66
f16bfc1d 67 spin_lock(&uncore->debug->lock);
0a9b2630
DCS
68
69 /* Save and disable mmio debugging for the user bypass */
f16bfc1d
MR
70 if (!uncore->debug->suspend_count++) {
71 uncore->debug->saved_mmio_check = uncore->debug->unclaimed_mmio_check;
72 uncore->debug->unclaimed_mmio_check = 0;
0a9b2630 73 }
f16bfc1d
MR
74
75 spin_unlock(&uncore->debug->lock);
0a9b2630
DCS
76}
77
f16bfc1d
MR
78static bool check_for_unclaimed_mmio(struct intel_uncore *uncore);
79
80static void mmio_debug_resume(struct intel_uncore *uncore)
0a9b2630 81{
639e30ee
MR
82 if (!uncore->debug)
83 return;
84
f16bfc1d
MR
85 spin_lock(&uncore->debug->lock);
86
87 if (!--uncore->debug->suspend_count)
88 uncore->debug->unclaimed_mmio_check = uncore->debug->saved_mmio_check;
0a9b2630 89
f16bfc1d
MR
90 if (check_for_unclaimed_mmio(uncore))
91 drm_info(&uncore->i915->drm,
92 "Invalid mmio detected during user access\n");
93
94 spin_unlock(&uncore->debug->lock);
0a9b2630
DCS
95}
96
05a2fb15
MK
97static const char * const forcewake_domain_names[] = {
98 "render",
bc33e71f 99 "gt",
05a2fb15 100 "media",
a89a70a8
DCS
101 "vdbox0",
102 "vdbox1",
103 "vdbox2",
104 "vdbox3",
bfac1e2b
MR
105 "vdbox4",
106 "vdbox5",
107 "vdbox6",
108 "vdbox7",
a89a70a8
DCS
109 "vebox0",
110 "vebox1",
bfac1e2b
MR
111 "vebox2",
112 "vebox3",
14f2f9bf 113 "gsc",
05a2fb15
MK
114};
115
116const char *
48c1026a 117intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
05a2fb15 118{
53abb679 119 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
05a2fb15
MK
120
121 if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
122 return forcewake_domain_names[id];
123
124 WARN_ON(id);
125
126 return "unknown";
127}
128
535d8d27 129#define fw_ack(d) readl((d)->reg_ack)
159367bb
DCS
130#define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set)
131#define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set)
535d8d27 132
05a2fb15 133static inline void
159367bb 134fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
907b28c5 135{
26376a7e
OM
136 /*
137 * We don't really know if the powerwell for the forcewake domain we are
138 * trying to reset here does exist at this point (engines could be fused
139 * off in ICL+), so no waiting for acks
140 */
6509dd11
RS
141 /* WaRsClearFWBitsAtReset */
142 if (GRAPHICS_VER(d->uncore->i915) >= 12)
143 fw_clear(d, 0xefff);
144 else
145 fw_clear(d, 0xffff);
907b28c5
CW
146}
147
05a2fb15
MK
148static inline void
149fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
907b28c5 150{
77adbd8f
CW
151 GEM_BUG_ON(d->uncore->fw_domains_timer & d->mask);
152 d->uncore->fw_domains_timer |= d->mask;
a57a4a67
TU
153 d->wake_count++;
154 hrtimer_start_range_ns(&d->timer,
8b0e1953 155 NSEC_PER_MSEC,
a57a4a67
TU
156 NSEC_PER_MSEC,
157 HRTIMER_MODE_REL);
907b28c5
CW
158}
159
71306303 160static inline int
535d8d27 161__wait_for_ack(const struct intel_uncore_forcewake_domain *d,
71306303
MK
162 const u32 ack,
163 const u32 value)
164{
535d8d27 165 return wait_for_atomic((fw_ack(d) & ack) == value,
71306303
MK
166 FORCEWAKE_ACK_TIMEOUT_MS);
167}
168
169static inline int
535d8d27 170wait_ack_clear(const struct intel_uncore_forcewake_domain *d,
71306303
MK
171 const u32 ack)
172{
535d8d27 173 return __wait_for_ack(d, ack, 0);
71306303
MK
174}
175
176static inline int
535d8d27 177wait_ack_set(const struct intel_uncore_forcewake_domain *d,
71306303
MK
178 const u32 ack)
179{
535d8d27 180 return __wait_for_ack(d, ack, ack);
71306303
MK
181}
182
05a2fb15 183static inline void
535d8d27 184fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
907b28c5 185{
fdd9b7dc
MR
186 if (!wait_ack_clear(d, FORCEWAKE_KERNEL))
187 return;
188
a857add7 189 if (fw_ack(d) == ~0) {
fdd9b7dc
MR
190 drm_err(&d->uncore->i915->drm,
191 "%s: MMIO unreliable (forcewake register returns 0xFFFFFFFF)!\n",
192 intel_uncore_forcewake_domain_to_str(d->id));
a857add7
CW
193 intel_gt_set_wedged_async(d->uncore->gt);
194 } else {
a10234fd
TU
195 drm_err(&d->uncore->i915->drm,
196 "%s: timed out waiting for forcewake ack to clear.\n",
197 intel_uncore_forcewake_domain_to_str(d->id));
a857add7 198 }
fdd9b7dc
MR
199
200 add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
05a2fb15 201}
907b28c5 202
71306303
MK
203enum ack_type {
204 ACK_CLEAR = 0,
205 ACK_SET
206};
207
208static int
535d8d27 209fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d,
71306303
MK
210 const enum ack_type type)
211{
212 const u32 ack_bit = FORCEWAKE_KERNEL;
213 const u32 value = type == ACK_SET ? ack_bit : 0;
214 unsigned int pass;
215 bool ack_detected;
216
217 /*
218 * There is a possibility of driver's wake request colliding
219 * with hardware's own wake requests and that can cause
220 * hardware to not deliver the driver's ack message.
221 *
222 * Use a fallback bit toggle to kick the gpu state machine
223 * in the hope that the original ack will be delivered along with
224 * the fallback ack.
225 *
cc38cae7
OM
226 * This workaround is described in HSDES #1604254524 and it's known as:
227 * WaRsForcewakeAddDelayForAck:skl,bxt,kbl,glk,cfl,cnl,icl
228 * although the name is a bit misleading.
71306303
MK
229 */
230
231 pass = 1;
232 do {
535d8d27 233 wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK);
71306303 234
159367bb 235 fw_set(d, FORCEWAKE_KERNEL_FALLBACK);
71306303
MK
236 /* Give gt some time to relax before the polling frenzy */
237 udelay(10 * pass);
535d8d27 238 wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK);
71306303 239
535d8d27 240 ack_detected = (fw_ack(d) & ack_bit) == value;
71306303 241
159367bb 242 fw_clear(d, FORCEWAKE_KERNEL_FALLBACK);
71306303
MK
243 } while (!ack_detected && pass++ < 10);
244
a10234fd
TU
245 drm_dbg(&d->uncore->i915->drm,
246 "%s had to use fallback to %s ack, 0x%x (passes %u)\n",
247 intel_uncore_forcewake_domain_to_str(d->id),
248 type == ACK_SET ? "set" : "clear",
249 fw_ack(d),
250 pass);
71306303
MK
251
252 return ack_detected ? 0 : -ETIMEDOUT;
253}
254
255static inline void
535d8d27 256fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d)
71306303 257{
535d8d27 258 if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL)))
71306303
MK
259 return;
260
535d8d27
DCS
261 if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR))
262 fw_domain_wait_ack_clear(d);
71306303
MK
263}
264
05a2fb15 265static inline void
159367bb 266fw_domain_get(const struct intel_uncore_forcewake_domain *d)
05a2fb15 267{
159367bb 268 fw_set(d, FORCEWAKE_KERNEL);
05a2fb15 269}
907b28c5 270
05a2fb15 271static inline void
535d8d27 272fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
05a2fb15 273{
18ecc6c5 274 if (wait_ack_set(d, FORCEWAKE_KERNEL)) {
a10234fd
TU
275 drm_err(&d->uncore->i915->drm,
276 "%s: timed out waiting for forcewake ack request.\n",
277 intel_uncore_forcewake_domain_to_str(d->id));
65706203 278 add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
18ecc6c5 279 }
05a2fb15 280}
907b28c5 281
71306303 282static inline void
535d8d27 283fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d)
71306303 284{
535d8d27 285 if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL)))
71306303
MK
286 return;
287
535d8d27
DCS
288 if (fw_domain_wait_ack_with_fallback(d, ACK_SET))
289 fw_domain_wait_ack_set(d);
71306303
MK
290}
291
05a2fb15 292static inline void
159367bb 293fw_domain_put(const struct intel_uncore_forcewake_domain *d)
05a2fb15 294{
159367bb 295 fw_clear(d, FORCEWAKE_KERNEL);
907b28c5
CW
296}
297
05a2fb15 298static void
5716c8c6 299fw_domains_get_normal(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
907b28c5 300{
05a2fb15 301 struct intel_uncore_forcewake_domain *d;
d2dc94bc 302 unsigned int tmp;
907b28c5 303
535d8d27 304 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
d2dc94bc 305
f568eeee 306 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
535d8d27 307 fw_domain_wait_ack_clear(d);
159367bb 308 fw_domain_get(d);
05a2fb15 309 }
4e1176dd 310
f568eeee 311 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
535d8d27 312 fw_domain_wait_ack_set(d);
71306303 313
535d8d27 314 uncore->fw_domains_active |= fw_domains;
71306303
MK
315}
316
317static void
f568eeee 318fw_domains_get_with_fallback(struct intel_uncore *uncore,
71306303
MK
319 enum forcewake_domains fw_domains)
320{
321 struct intel_uncore_forcewake_domain *d;
322 unsigned int tmp;
323
535d8d27 324 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
71306303 325
f568eeee 326 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
535d8d27 327 fw_domain_wait_ack_clear_fallback(d);
159367bb 328 fw_domain_get(d);
71306303
MK
329 }
330
f568eeee 331 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
535d8d27 332 fw_domain_wait_ack_set_fallback(d);
b8473050 333
535d8d27 334 uncore->fw_domains_active |= fw_domains;
05a2fb15 335}
907b28c5 336
05a2fb15 337static void
f568eeee 338fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
05a2fb15
MK
339{
340 struct intel_uncore_forcewake_domain *d;
d2dc94bc
CW
341 unsigned int tmp;
342
535d8d27 343 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
907b28c5 344
f568eeee 345 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
159367bb 346 fw_domain_put(d);
b8473050 347
535d8d27 348 uncore->fw_domains_active &= ~fw_domains;
05a2fb15 349}
907b28c5 350
05a2fb15 351static void
f568eeee 352fw_domains_reset(struct intel_uncore *uncore,
577ac4bd 353 enum forcewake_domains fw_domains)
05a2fb15
MK
354{
355 struct intel_uncore_forcewake_domain *d;
d2dc94bc 356 unsigned int tmp;
05a2fb15 357
d2dc94bc 358 if (!fw_domains)
3225b2f9 359 return;
f9b3927a 360
535d8d27 361 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
d2dc94bc 362
f568eeee 363 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
159367bb 364 fw_domain_reset(d);
05a2fb15
MK
365}
366
6ebc9692 367static inline u32 gt_thread_status(struct intel_uncore *uncore)
a5b22b5e
CW
368{
369 u32 val;
370
6cc5ca76 371 val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG);
a5b22b5e
CW
372 val &= GEN6_GT_THREAD_STATUS_CORE_MASK;
373
374 return val;
375}
376
6ebc9692 377static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
05a2fb15 378{
a5b22b5e
CW
379 /*
380 * w/a for a sporadic read returning 0 by waiting for the GT
05a2fb15
MK
381 * thread to wake up.
382 */
a9f236d1
PB
383 drm_WARN_ONCE(&uncore->i915->drm,
384 wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
385 "GT thread status wait timed out\n");
05a2fb15
MK
386}
387
f568eeee 388static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
48c1026a 389 enum forcewake_domains fw_domains)
05a2fb15 390{
5def925d 391 fw_domains_get_normal(uncore, fw_domains);
907b28c5 392
05a2fb15 393 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
6ebc9692 394 __gen6_gt_wait_for_thread_c0(uncore);
907b28c5
CW
395}
396
6ebc9692 397static inline u32 fifo_free_entries(struct intel_uncore *uncore)
c32e3788 398{
6cc5ca76 399 u32 count = __raw_uncore_read32(uncore, GTFIFOCTL);
c32e3788
DG
400
401 return count & GT_FIFO_FREE_ENTRIES_MASK;
402}
403
6ebc9692 404static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
907b28c5 405{
6b07b6d2 406 u32 n;
907b28c5 407
5135d64b
D
408 /* On VLV, FIFO will be shared by both SW and HW.
409 * So, we need to read the FREE_ENTRIES everytime */
01385758 410 if (IS_VALLEYVIEW(uncore->i915))
6ebc9692 411 n = fifo_free_entries(uncore);
6b07b6d2 412 else
272c7e52 413 n = uncore->fifo_count;
6b07b6d2
MK
414
415 if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
6ebc9692 416 if (wait_for_atomic((n = fifo_free_entries(uncore)) >
6b07b6d2
MK
417 GT_FIFO_NUM_RESERVED_ENTRIES,
418 GT_FIFO_TIMEOUT_MS)) {
d0208cfa
WK
419 drm_dbg(&uncore->i915->drm,
420 "GT_FIFO timeout, entries: %u\n", n);
6b07b6d2 421 return;
907b28c5 422 }
907b28c5 423 }
907b28c5 424
272c7e52 425 uncore->fifo_count = n - 1;
907b28c5
CW
426}
427
a57a4a67
TU
428static enum hrtimer_restart
429intel_uncore_fw_release_timer(struct hrtimer *timer)
38cff0b1 430{
a57a4a67
TU
431 struct intel_uncore_forcewake_domain *domain =
432 container_of(timer, struct intel_uncore_forcewake_domain, timer);
f833cdb0 433 struct intel_uncore *uncore = domain->uncore;
b2cff0db 434 unsigned long irqflags;
38cff0b1 435
eb17af67 436 assert_rpm_device_not_suspended(uncore->rpm);
38cff0b1 437
c9e0c6da
CW
438 if (xchg(&domain->active, false))
439 return HRTIMER_RESTART;
440
f568eeee 441 spin_lock_irqsave(&uncore->lock, irqflags);
b2cff0db 442
77adbd8f
CW
443 uncore->fw_domains_timer &= ~domain->mask;
444
445 GEM_BUG_ON(!domain->wake_count);
b8473050 446 if (--domain->wake_count == 0)
5716c8c6 447 fw_domains_put(uncore, domain->mask);
b2cff0db 448
f568eeee 449 spin_unlock_irqrestore(&uncore->lock, irqflags);
a57a4a67
TU
450
451 return HRTIMER_NORESTART;
38cff0b1
ZW
452}
453
a5266db4 454/* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
d60996ab 455static unsigned int
f568eeee 456intel_uncore_forcewake_reset(struct intel_uncore *uncore)
38cff0b1 457{
48c1026a 458 unsigned long irqflags;
b2cff0db 459 struct intel_uncore_forcewake_domain *domain;
48c1026a 460 int retry_count = 100;
003342a5 461 enum forcewake_domains fw, active_domains;
38cff0b1 462
a5266db4
HG
463 iosf_mbi_assert_punit_acquired();
464
b2cff0db
CW
465 /* Hold uncore.lock across reset to prevent any register access
466 * with forcewake not set correctly. Wait until all pending
467 * timers are run before holding.
468 */
469 while (1) {
d2dc94bc
CW
470 unsigned int tmp;
471
b2cff0db 472 active_domains = 0;
38cff0b1 473
f568eeee 474 for_each_fw_domain(domain, uncore, tmp) {
c9e0c6da 475 smp_store_mb(domain->active, false);
a57a4a67 476 if (hrtimer_cancel(&domain->timer) == 0)
b2cff0db 477 continue;
38cff0b1 478
a57a4a67 479 intel_uncore_fw_release_timer(&domain->timer);
b2cff0db 480 }
aec347ab 481
f568eeee 482 spin_lock_irqsave(&uncore->lock, irqflags);
b2ec142c 483
f568eeee 484 for_each_fw_domain(domain, uncore, tmp) {
a57a4a67 485 if (hrtimer_active(&domain->timer))
33c582c1 486 active_domains |= domain->mask;
b2cff0db 487 }
3123fcaf 488
b2cff0db
CW
489 if (active_domains == 0)
490 break;
aec347ab 491
b2cff0db 492 if (--retry_count == 0) {
d0208cfa 493 drm_err(&uncore->i915->drm, "Timed out waiting for forcewake timers to finish\n");
b2cff0db
CW
494 break;
495 }
0294ae7b 496
f568eeee 497 spin_unlock_irqrestore(&uncore->lock, irqflags);
b2cff0db
CW
498 cond_resched();
499 }
0294ae7b 500
a9f236d1 501 drm_WARN_ON(&uncore->i915->drm, active_domains);
b2cff0db 502
f568eeee 503 fw = uncore->fw_domains_active;
b2cff0db 504 if (fw)
5716c8c6 505 fw_domains_put(uncore, fw);
ef46e0d2 506
f568eeee
DCS
507 fw_domains_reset(uncore, uncore->fw_domains);
508 assert_forcewakes_inactive(uncore);
b2cff0db 509
f568eeee 510 spin_unlock_irqrestore(&uncore->lock, irqflags);
d60996ab
CW
511
512 return fw; /* track the lost user forcewake domains */
ef46e0d2
DV
513}
514
8a47eb19 515static bool
6ebc9692 516fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore)
8a47eb19
MK
517{
518 u32 dbg;
519
6cc5ca76 520 dbg = __raw_uncore_read32(uncore, FPGA_DBG);
8a47eb19
MK
521 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
522 return false;
523
29b6f88d
MR
524 /*
525 * Bugs in PCI programming (or failing hardware) can occasionally cause
526 * us to lose access to the MMIO BAR. When this happens, register
527 * reads will come back with 0xFFFFFFFF for every register and things
528 * go bad very quickly. Let's try to detect that special case and at
529 * least try to print a more informative message about what has
530 * happened.
531 *
532 * During normal operation the FPGA_DBG register has several unused
533 * bits that will always read back as 0's so we can use them as canaries
534 * to recognize when MMIO accesses are just busted.
535 */
536 if (unlikely(dbg == ~0))
537 drm_err(&uncore->i915->drm,
538 "Lost access to MMIO BAR; all registers now read back as 0xFFFFFFFF!\n");
539
6cc5ca76 540 __raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
8a47eb19
MK
541
542 return true;
543}
544
8ac3e1bb 545static bool
6ebc9692 546vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore)
8ac3e1bb
MK
547{
548 u32 cer;
549
6cc5ca76 550 cer = __raw_uncore_read32(uncore, CLAIM_ER);
8ac3e1bb
MK
551 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
552 return false;
553
6cc5ca76 554 __raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR);
8ac3e1bb
MK
555
556 return true;
557}
558
a338908c 559static bool
6ebc9692 560gen6_check_for_fifo_debug(struct intel_uncore *uncore)
a338908c
MK
561{
562 u32 fifodbg;
563
6cc5ca76 564 fifodbg = __raw_uncore_read32(uncore, GTFIFODBG);
a338908c
MK
565
566 if (unlikely(fifodbg)) {
d0208cfa 567 drm_dbg(&uncore->i915->drm, "GTFIFODBG = 0x08%x\n", fifodbg);
6cc5ca76 568 __raw_uncore_write32(uncore, GTFIFODBG, fifodbg);
a338908c
MK
569 }
570
571 return fifodbg;
572}
573
8ac3e1bb 574static bool
2cf7bf6f 575check_for_unclaimed_mmio(struct intel_uncore *uncore)
8ac3e1bb 576{
a338908c
MK
577 bool ret = false;
578
0a9b2630
DCS
579 lockdep_assert_held(&uncore->debug->lock);
580
581 if (uncore->debug->suspend_count)
582 return false;
583
2cf7bf6f 584 if (intel_uncore_has_fpga_dbg_unclaimed(uncore))
6ebc9692 585 ret |= fpga_check_for_unclaimed_mmio(uncore);
8ac3e1bb 586
2cf7bf6f 587 if (intel_uncore_has_dbg_unclaimed(uncore))
6ebc9692 588 ret |= vlv_check_for_unclaimed_mmio(uncore);
a338908c 589
2cf7bf6f 590 if (intel_uncore_has_fifo(uncore))
6ebc9692 591 ret |= gen6_check_for_fifo_debug(uncore);
8ac3e1bb 592
a338908c 593 return ret;
8ac3e1bb
MK
594}
595
2e81bc61
DCS
596static void forcewake_early_sanitize(struct intel_uncore *uncore,
597 unsigned int restore_forcewake)
f9b3927a 598{
2e81bc61 599 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
907b28c5 600
a04f90a3 601 /* WaDisableShadowRegForCpd:chv */
01385758 602 if (IS_CHERRYVIEW(uncore->i915)) {
6cc5ca76
DCS
603 __raw_uncore_write32(uncore, GTFIFOCTL,
604 __raw_uncore_read32(uncore, GTFIFOCTL) |
605 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
606 GT_FIFO_CTL_RC6_POLICY_STALL);
a04f90a3
D
607 }
608
a5266db4 609 iosf_mbi_punit_acquire();
f7de5027 610 intel_uncore_forcewake_reset(uncore);
d60996ab 611 if (restore_forcewake) {
f7de5027 612 spin_lock_irq(&uncore->lock);
5716c8c6 613 fw_domains_get(uncore, restore_forcewake);
f7de5027 614
2cf7bf6f 615 if (intel_uncore_has_fifo(uncore))
6ebc9692 616 uncore->fifo_count = fifo_free_entries(uncore);
f7de5027 617 spin_unlock_irq(&uncore->lock);
d60996ab 618 }
a5266db4 619 iosf_mbi_punit_release();
521198a2
MK
620}
621
f7de5027 622void intel_uncore_suspend(struct intel_uncore *uncore)
ed493883 623{
2e81bc61
DCS
624 if (!intel_uncore_has_forcewake(uncore))
625 return;
626
a5266db4
HG
627 iosf_mbi_punit_acquire();
628 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
f7de5027
DCS
629 &uncore->pmic_bus_access_nb);
630 uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore);
a5266db4 631 iosf_mbi_punit_release();
68f60946
HG
632}
633
f7de5027 634void intel_uncore_resume_early(struct intel_uncore *uncore)
68f60946 635{
d60996ab
CW
636 unsigned int restore_forcewake;
637
2e81bc61 638 if (intel_uncore_unclaimed_mmio(uncore))
d0208cfa 639 drm_dbg(&uncore->i915->drm, "unclaimed mmio detected on resume, clearing\n");
2e81bc61
DCS
640
641 if (!intel_uncore_has_forcewake(uncore))
642 return;
643
f7de5027 644 restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
2e81bc61 645 forcewake_early_sanitize(uncore, restore_forcewake);
d60996ab 646
f7de5027 647 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
ed493883
ID
648}
649
f7de5027 650void intel_uncore_runtime_resume(struct intel_uncore *uncore)
bedf4d79 651{
2e81bc61
DCS
652 if (!intel_uncore_has_forcewake(uncore))
653 return;
654
f7de5027 655 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
bedf4d79
HG
656}
657
f568eeee 658static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
a6111f7b
CW
659 enum forcewake_domains fw_domains)
660{
661 struct intel_uncore_forcewake_domain *domain;
d2dc94bc 662 unsigned int tmp;
a6111f7b 663
f568eeee 664 fw_domains &= uncore->fw_domains;
a6111f7b 665
f568eeee 666 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
c9e0c6da 667 if (domain->wake_count++) {
33c582c1 668 fw_domains &= ~domain->mask;
c9e0c6da
CW
669 domain->active = true;
670 }
671 }
a6111f7b 672
b8473050 673 if (fw_domains)
5716c8c6 674 fw_domains_get(uncore, fw_domains);
a6111f7b
CW
675}
676
59bad947
MK
677/**
678 * intel_uncore_forcewake_get - grab forcewake domain references
3ceea6a1 679 * @uncore: the intel_uncore structure
59bad947
MK
680 * @fw_domains: forcewake domains to get reference on
681 *
682 * This function can be used get GT's forcewake domain references.
683 * Normal register access will handle the forcewake domains automatically.
684 * However if some sequence requires the GT to not power down a particular
685 * forcewake domains this function should be called at the beginning of the
686 * sequence. And subsequently the reference should be dropped by symmetric
687 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
688 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
907b28c5 689 */
3ceea6a1 690void intel_uncore_forcewake_get(struct intel_uncore *uncore,
48c1026a 691 enum forcewake_domains fw_domains)
907b28c5
CW
692{
693 unsigned long irqflags;
694
5716c8c6 695 if (!uncore->fw_get_funcs)
ab484f8f
BW
696 return;
697
87b391b9 698 assert_rpm_wakelock_held(uncore->rpm);
c8c8fb33 699
f568eeee
DCS
700 spin_lock_irqsave(&uncore->lock, irqflags);
701 __intel_uncore_forcewake_get(uncore, fw_domains);
702 spin_unlock_irqrestore(&uncore->lock, irqflags);
907b28c5
CW
703}
704
d7a133d8
CW
705/**
706 * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
3ceea6a1 707 * @uncore: the intel_uncore structure
d7a133d8
CW
708 *
709 * This function is a wrapper around intel_uncore_forcewake_get() to acquire
710 * the GT powerwell and in the process disable our debugging for the
711 * duration of userspace's bypass.
712 */
3ceea6a1 713void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
d7a133d8 714{
f568eeee 715 spin_lock_irq(&uncore->lock);
0a9b2630 716 if (!uncore->user_forcewake_count++) {
3ceea6a1 717 intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
f16bfc1d 718 mmio_debug_suspend(uncore);
d7a133d8 719 }
f568eeee 720 spin_unlock_irq(&uncore->lock);
d7a133d8
CW
721}
722
723/**
724 * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
3ceea6a1 725 * @uncore: the intel_uncore structure
d7a133d8
CW
726 *
727 * This function complements intel_uncore_forcewake_user_get() and releases
728 * the GT powerwell taken on behalf of the userspace bypass.
729 */
3ceea6a1 730void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
d7a133d8 731{
f568eeee 732 spin_lock_irq(&uncore->lock);
0a9b2630 733 if (!--uncore->user_forcewake_count) {
f16bfc1d 734 mmio_debug_resume(uncore);
3ceea6a1 735 intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
d7a133d8 736 }
f568eeee 737 spin_unlock_irq(&uncore->lock);
d7a133d8
CW
738}
739
59bad947 740/**
a6111f7b 741 * intel_uncore_forcewake_get__locked - grab forcewake domain references
3ceea6a1 742 * @uncore: the intel_uncore structure
a6111f7b 743 * @fw_domains: forcewake domains to get reference on
59bad947 744 *
a6111f7b
CW
745 * See intel_uncore_forcewake_get(). This variant places the onus
746 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
907b28c5 747 */
3ceea6a1 748void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
a6111f7b
CW
749 enum forcewake_domains fw_domains)
750{
f568eeee
DCS
751 lockdep_assert_held(&uncore->lock);
752
5716c8c6 753 if (!uncore->fw_get_funcs)
a6111f7b
CW
754 return;
755
f568eeee 756 __intel_uncore_forcewake_get(uncore, fw_domains);
a6111f7b
CW
757}
758
f568eeee 759static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
7938d615
TU
760 enum forcewake_domains fw_domains,
761 bool delayed)
907b28c5 762{
b2cff0db 763 struct intel_uncore_forcewake_domain *domain;
d2dc94bc 764 unsigned int tmp;
907b28c5 765
f568eeee 766 fw_domains &= uncore->fw_domains;
b2cff0db 767
f568eeee 768 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
77adbd8f 769 GEM_BUG_ON(!domain->wake_count);
b2cff0db 770
c9e0c6da
CW
771 if (--domain->wake_count) {
772 domain->active = true;
b2cff0db 773 continue;
c9e0c6da 774 }
b2cff0db 775
7938d615
TU
776 if (delayed &&
777 !(domain->uncore->fw_domains_timer & domain->mask))
778 fw_domain_arm_timer(domain);
779 else
780 fw_domains_put(uncore, domain->mask);
aec347ab 781 }
a6111f7b 782}
dc9fb09c 783
a6111f7b
CW
784/**
785 * intel_uncore_forcewake_put - release a forcewake domain reference
3ceea6a1 786 * @uncore: the intel_uncore structure
a6111f7b
CW
787 * @fw_domains: forcewake domains to put references
788 *
789 * This function drops the device-level forcewakes for specified
790 * domains obtained by intel_uncore_forcewake_get().
791 */
3ceea6a1 792void intel_uncore_forcewake_put(struct intel_uncore *uncore,
a6111f7b
CW
793 enum forcewake_domains fw_domains)
794{
795 unsigned long irqflags;
796
5716c8c6 797 if (!uncore->fw_get_funcs)
a6111f7b
CW
798 return;
799
f568eeee 800 spin_lock_irqsave(&uncore->lock, irqflags);
7938d615
TU
801 __intel_uncore_forcewake_put(uncore, fw_domains, false);
802 spin_unlock_irqrestore(&uncore->lock, irqflags);
803}
804
805void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore,
806 enum forcewake_domains fw_domains)
807{
808 unsigned long irqflags;
809
810 if (!uncore->fw_get_funcs)
811 return;
812
813 spin_lock_irqsave(&uncore->lock, irqflags);
814 __intel_uncore_forcewake_put(uncore, fw_domains, true);
f568eeee 815 spin_unlock_irqrestore(&uncore->lock, irqflags);
907b28c5
CW
816}
817
032d992d
CW
818/**
819 * intel_uncore_forcewake_flush - flush the delayed release
820 * @uncore: the intel_uncore structure
821 * @fw_domains: forcewake domains to flush
822 */
823void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
824 enum forcewake_domains fw_domains)
825{
826 struct intel_uncore_forcewake_domain *domain;
827 unsigned int tmp;
828
5716c8c6 829 if (!uncore->fw_get_funcs)
032d992d
CW
830 return;
831
832 fw_domains &= uncore->fw_domains;
833 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
834 WRITE_ONCE(domain->active, false);
835 if (hrtimer_cancel(&domain->timer))
836 intel_uncore_fw_release_timer(&domain->timer);
837 }
838}
839
a6111f7b 840/**
449a0ef5 841 * intel_uncore_forcewake_put__locked - release forcewake domain references
3ceea6a1 842 * @uncore: the intel_uncore structure
449a0ef5 843 * @fw_domains: forcewake domains to put references
a6111f7b
CW
844 *
845 * See intel_uncore_forcewake_put(). This variant places the onus
846 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
847 */
3ceea6a1 848void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
a6111f7b
CW
849 enum forcewake_domains fw_domains)
850{
f568eeee
DCS
851 lockdep_assert_held(&uncore->lock);
852
5716c8c6 853 if (!uncore->fw_get_funcs)
a6111f7b
CW
854 return;
855
7938d615 856 __intel_uncore_forcewake_put(uncore, fw_domains, false);
a6111f7b
CW
857}
858
f568eeee 859void assert_forcewakes_inactive(struct intel_uncore *uncore)
e998c40f 860{
5716c8c6 861 if (!uncore->fw_get_funcs)
e998c40f
PZ
862 return;
863
a9f236d1
PB
864 drm_WARN(&uncore->i915->drm, uncore->fw_domains_active,
865 "Expected all fw_domains to be inactive, but %08x are still on\n",
866 uncore->fw_domains_active);
67e64564
CW
867}
868
f568eeee 869void assert_forcewakes_active(struct intel_uncore *uncore,
67e64564
CW
870 enum forcewake_domains fw_domains)
871{
b7dc9395
CW
872 struct intel_uncore_forcewake_domain *domain;
873 unsigned int tmp;
874
875 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
876 return;
877
5716c8c6 878 if (!uncore->fw_get_funcs)
67e64564
CW
879 return;
880
15e7facb
CW
881 spin_lock_irq(&uncore->lock);
882
87b391b9 883 assert_rpm_wakelock_held(uncore->rpm);
67e64564 884
f568eeee 885 fw_domains &= uncore->fw_domains;
a9f236d1
PB
886 drm_WARN(&uncore->i915->drm, fw_domains & ~uncore->fw_domains_active,
887 "Expected %08x fw_domains to be active, but %08x are off\n",
888 fw_domains, fw_domains & ~uncore->fw_domains_active);
b7dc9395
CW
889
890 /*
891 * Check that the caller has an explicit wakeref and we don't mistake
892 * it for the auto wakeref.
893 */
b7dc9395 894 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
badf1f27 895 unsigned int actual = READ_ONCE(domain->wake_count);
b7dc9395
CW
896 unsigned int expect = 1;
897
77adbd8f 898 if (uncore->fw_domains_timer & domain->mask)
b7dc9395
CW
899 expect++; /* pending automatic release */
900
a9f236d1
PB
901 if (drm_WARN(&uncore->i915->drm, actual < expect,
902 "Expected domain %d to be held awake by caller, count=%d\n",
903 domain->id, actual))
b7dc9395
CW
904 break;
905 }
15e7facb
CW
906
907 spin_unlock_irq(&uncore->lock);
e998c40f
PZ
908}
909
14f2f9bf
MR
910/*
911 * We give fast paths for the really cool registers. The second range includes
912 * media domains (and the GSC starting from Xe_LPM+)
913 */
aef02736
MR
914#define NEEDS_FORCE_WAKE(reg) ({ \
915 u32 __reg = (reg); \
14f2f9bf 916 __reg < 0x40000 || __reg >= 0x116000; \
6863b76c
TU
917})
918
9480dbf0 919static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
91e630b9 920{
91e630b9
TU
921 if (offset < entry->start)
922 return -1;
923 else if (offset > entry->end)
924 return 1;
925 else
926 return 0;
927}
928
9480dbf0
TU
929/* Copied and "macroized" from lib/bsearch.c */
930#define BSEARCH(key, base, num, cmp) ({ \
931 unsigned int start__ = 0, end__ = (num); \
932 typeof(base) result__ = NULL; \
933 while (start__ < end__) { \
934 unsigned int mid__ = start__ + (end__ - start__) / 2; \
935 int ret__ = (cmp)((key), (base) + mid__); \
936 if (ret__ < 0) { \
937 end__ = mid__; \
938 } else if (ret__ > 0) { \
939 start__ = mid__ + 1; \
940 } else { \
941 result__ = (base) + mid__; \
942 break; \
943 } \
944 } \
945 result__; \
946})
947
9fc1117c 948static enum forcewake_domains
cb7ee690 949find_fw_domain(struct intel_uncore *uncore, u32 offset)
9fc1117c 950{
9480dbf0 951 const struct intel_forcewake_range *entry;
9fc1117c 952
eefac38a
MR
953 if (IS_GSI_REG(offset))
954 offset += uncore->gsi_offset;
955
9480dbf0 956 entry = BSEARCH(offset,
cb7ee690
DCS
957 uncore->fw_domains_table,
958 uncore->fw_domains_table_entries,
91e630b9 959 fw_range_cmp);
38fb6a40 960
99191427
JL
961 if (!entry)
962 return 0;
963
a89a70a8
DCS
964 /*
965 * The list of FW domains depends on the SKU in gen11+ so we
966 * can't determine it statically. We use FORCEWAKE_ALL and
967 * translate it here to the list of available domains.
968 */
969 if (entry->domains == FORCEWAKE_ALL)
cb7ee690 970 return uncore->fw_domains;
a89a70a8 971
a9f236d1
PB
972 drm_WARN(&uncore->i915->drm, entry->domains & ~uncore->fw_domains,
973 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
974 entry->domains & ~uncore->fw_domains, offset);
99191427
JL
975
976 return entry->domains;
9fc1117c
TU
977}
978
d32e8ed9
MR
979/*
980 * Shadowed register tables describe special register ranges that i915 is
981 * allowed to write to without acquiring forcewake. If these registers' power
982 * wells are down, the hardware will save values written by i915 to a shadow
983 * copy and automatically transfer them into the real register the next time
984 * the power well is woken up. Shadowing only applies to writes; forcewake
985 * must still be acquired when reading from registers in these ranges.
986 *
987 * The documentation for shadowed registers is somewhat spotty on older
988 * platforms. However missing registers from these lists is non-fatal; it just
989 * means we'll wake up the hardware for some register accesses where we didn't
990 * really need to.
991 *
992 * The ranges listed in these tables must be sorted by offset.
993 *
994 * When adding new tables here, please also add them to
995 * intel_shadow_table_check() in selftests/intel_uncore.c so that they will be
996 * scanned for obvious mistakes or typos by the selftests.
997 */
6863b76c 998
f9d56cd6
MR
999static const struct i915_range gen8_shadowed_regs[] = {
1000 { .start = 0x2030, .end = 0x2030 },
1001 { .start = 0xA008, .end = 0xA00C },
1002 { .start = 0x12030, .end = 0x12030 },
1003 { .start = 0x1a030, .end = 0x1a030 },
1004 { .start = 0x22030, .end = 0x22030 },
6863b76c
TU
1005};
1006
f9d56cd6
MR
1007static const struct i915_range gen11_shadowed_regs[] = {
1008 { .start = 0x2030, .end = 0x2030 },
1009 { .start = 0x2550, .end = 0x2550 },
1010 { .start = 0xA008, .end = 0xA00C },
1011 { .start = 0x22030, .end = 0x22030 },
0bb50de1
MR
1012 { .start = 0x22230, .end = 0x22230 },
1013 { .start = 0x22510, .end = 0x22550 },
f9d56cd6 1014 { .start = 0x1C0030, .end = 0x1C0030 },
0bb50de1
MR
1015 { .start = 0x1C0230, .end = 0x1C0230 },
1016 { .start = 0x1C0510, .end = 0x1C0550 },
f9d56cd6 1017 { .start = 0x1C4030, .end = 0x1C4030 },
0bb50de1
MR
1018 { .start = 0x1C4230, .end = 0x1C4230 },
1019 { .start = 0x1C4510, .end = 0x1C4550 },
f9d56cd6 1020 { .start = 0x1C8030, .end = 0x1C8030 },
0bb50de1
MR
1021 { .start = 0x1C8230, .end = 0x1C8230 },
1022 { .start = 0x1C8510, .end = 0x1C8550 },
f9d56cd6 1023 { .start = 0x1D0030, .end = 0x1D0030 },
0bb50de1
MR
1024 { .start = 0x1D0230, .end = 0x1D0230 },
1025 { .start = 0x1D0510, .end = 0x1D0550 },
f9d56cd6 1026 { .start = 0x1D4030, .end = 0x1D4030 },
0bb50de1
MR
1027 { .start = 0x1D4230, .end = 0x1D4230 },
1028 { .start = 0x1D4510, .end = 0x1D4550 },
f9d56cd6 1029 { .start = 0x1D8030, .end = 0x1D8030 },
0bb50de1
MR
1030 { .start = 0x1D8230, .end = 0x1D8230 },
1031 { .start = 0x1D8510, .end = 0x1D8550 },
a89a70a8
DCS
1032};
1033
f9d56cd6
MR
1034static const struct i915_range gen12_shadowed_regs[] = {
1035 { .start = 0x2030, .end = 0x2030 },
5798a769 1036 { .start = 0x2510, .end = 0x2550 },
f9d56cd6 1037 { .start = 0xA008, .end = 0xA00C },
5798a769
MR
1038 { .start = 0xA188, .end = 0xA188 },
1039 { .start = 0xA278, .end = 0xA278 },
1040 { .start = 0xA540, .end = 0xA56C },
1041 { .start = 0xC4C8, .end = 0xC4C8 },
1042 { .start = 0xC4D4, .end = 0xC4D4 },
1043 { .start = 0xC600, .end = 0xC600 },
f9d56cd6 1044 { .start = 0x22030, .end = 0x22030 },
5798a769 1045 { .start = 0x22510, .end = 0x22550 },
f9d56cd6 1046 { .start = 0x1C0030, .end = 0x1C0030 },
5798a769 1047 { .start = 0x1C0510, .end = 0x1C0550 },
f9d56cd6 1048 { .start = 0x1C4030, .end = 0x1C4030 },
5798a769 1049 { .start = 0x1C4510, .end = 0x1C4550 },
f9d56cd6 1050 { .start = 0x1C8030, .end = 0x1C8030 },
5798a769 1051 { .start = 0x1C8510, .end = 0x1C8550 },
f9d56cd6 1052 { .start = 0x1D0030, .end = 0x1D0030 },
5798a769 1053 { .start = 0x1D0510, .end = 0x1D0550 },
f9d56cd6 1054 { .start = 0x1D4030, .end = 0x1D4030 },
5798a769 1055 { .start = 0x1D4510, .end = 0x1D4550 },
f9d56cd6 1056 { .start = 0x1D8030, .end = 0x1D8030 },
5798a769 1057 { .start = 0x1D8510, .end = 0x1D8550 },
bfac1e2b 1058
5c5c40e2
MR
1059 /*
1060 * The rest of these ranges are specific to Xe_HP and beyond, but
1061 * are reserved/unused ranges on earlier gen12 platforms, so they can
1062 * be safely added to the gen12 table.
1063 */
f9d56cd6 1064 { .start = 0x1E0030, .end = 0x1E0030 },
5c5c40e2 1065 { .start = 0x1E0510, .end = 0x1E0550 },
f9d56cd6 1066 { .start = 0x1E4030, .end = 0x1E4030 },
5c5c40e2 1067 { .start = 0x1E4510, .end = 0x1E4550 },
f9d56cd6 1068 { .start = 0x1E8030, .end = 0x1E8030 },
5c5c40e2 1069 { .start = 0x1E8510, .end = 0x1E8550 },
f9d56cd6 1070 { .start = 0x1F0030, .end = 0x1F0030 },
5c5c40e2 1071 { .start = 0x1F0510, .end = 0x1F0550 },
f9d56cd6 1072 { .start = 0x1F4030, .end = 0x1F4030 },
5c5c40e2 1073 { .start = 0x1F4510, .end = 0x1F4550 },
f9d56cd6 1074 { .start = 0x1F8030, .end = 0x1F8030 },
5c5c40e2 1075 { .start = 0x1F8510, .end = 0x1F8550 },
bfac1e2b
MR
1076};
1077
c74e66d4
MR
1078static const struct i915_range dg2_shadowed_regs[] = {
1079 { .start = 0x2030, .end = 0x2030 },
1080 { .start = 0x2510, .end = 0x2550 },
1081 { .start = 0xA008, .end = 0xA00C },
1082 { .start = 0xA188, .end = 0xA188 },
1083 { .start = 0xA278, .end = 0xA278 },
1084 { .start = 0xA540, .end = 0xA56C },
1085 { .start = 0xC4C8, .end = 0xC4C8 },
1086 { .start = 0xC4E0, .end = 0xC4E0 },
1087 { .start = 0xC600, .end = 0xC600 },
1088 { .start = 0xC658, .end = 0xC658 },
1089 { .start = 0x22030, .end = 0x22030 },
1090 { .start = 0x22510, .end = 0x22550 },
1091 { .start = 0x1C0030, .end = 0x1C0030 },
1092 { .start = 0x1C0510, .end = 0x1C0550 },
1093 { .start = 0x1C4030, .end = 0x1C4030 },
1094 { .start = 0x1C4510, .end = 0x1C4550 },
1095 { .start = 0x1C8030, .end = 0x1C8030 },
1096 { .start = 0x1C8510, .end = 0x1C8550 },
1097 { .start = 0x1D0030, .end = 0x1D0030 },
1098 { .start = 0x1D0510, .end = 0x1D0550 },
1099 { .start = 0x1D4030, .end = 0x1D4030 },
1100 { .start = 0x1D4510, .end = 0x1D4550 },
1101 { .start = 0x1D8030, .end = 0x1D8030 },
1102 { .start = 0x1D8510, .end = 0x1D8550 },
1103 { .start = 0x1E0030, .end = 0x1E0030 },
1104 { .start = 0x1E0510, .end = 0x1E0550 },
1105 { .start = 0x1E4030, .end = 0x1E4030 },
1106 { .start = 0x1E4510, .end = 0x1E4550 },
1107 { .start = 0x1E8030, .end = 0x1E8030 },
1108 { .start = 0x1E8510, .end = 0x1E8550 },
1109 { .start = 0x1F0030, .end = 0x1F0030 },
1110 { .start = 0x1F0510, .end = 0x1F0550 },
1111 { .start = 0x1F4030, .end = 0x1F4030 },
1112 { .start = 0x1F4510, .end = 0x1F4550 },
1113 { .start = 0x1F8030, .end = 0x1F8030 },
1114 { .start = 0x1F8510, .end = 0x1F8550 },
cf82d9dd
MT
1115};
1116
14f2f9bf
MR
1117static const struct i915_range mtl_shadowed_regs[] = {
1118 { .start = 0x2030, .end = 0x2030 },
1119 { .start = 0x2510, .end = 0x2550 },
1120 { .start = 0xA008, .end = 0xA00C },
1121 { .start = 0xA188, .end = 0xA188 },
1122 { .start = 0xA278, .end = 0xA278 },
1123 { .start = 0xA540, .end = 0xA56C },
1124 { .start = 0xC050, .end = 0xC050 },
1125 { .start = 0xC340, .end = 0xC340 },
1126 { .start = 0xC4C8, .end = 0xC4C8 },
1127 { .start = 0xC4E0, .end = 0xC4E0 },
1128 { .start = 0xC600, .end = 0xC600 },
1129 { .start = 0xC658, .end = 0xC658 },
1130 { .start = 0xCFD4, .end = 0xCFDC },
1131 { .start = 0x22030, .end = 0x22030 },
1132 { .start = 0x22510, .end = 0x22550 },
1133};
1134
1135static const struct i915_range xelpmp_shadowed_regs[] = {
1136 { .start = 0x1C0030, .end = 0x1C0030 },
1137 { .start = 0x1C0510, .end = 0x1C0550 },
1138 { .start = 0x1C8030, .end = 0x1C8030 },
1139 { .start = 0x1C8510, .end = 0x1C8550 },
1140 { .start = 0x1D0030, .end = 0x1D0030 },
1141 { .start = 0x1D0510, .end = 0x1D0550 },
1142 { .start = 0x38A008, .end = 0x38A00C },
1143 { .start = 0x38A188, .end = 0x38A188 },
1144 { .start = 0x38A278, .end = 0x38A278 },
1145 { .start = 0x38A540, .end = 0x38A56C },
1146 { .start = 0x38A618, .end = 0x38A618 },
1147 { .start = 0x38C050, .end = 0x38C050 },
1148 { .start = 0x38C340, .end = 0x38C340 },
1149 { .start = 0x38C4C8, .end = 0x38C4C8 },
1150 { .start = 0x38C4E0, .end = 0x38C4E4 },
1151 { .start = 0x38C600, .end = 0x38C600 },
1152 { .start = 0x38C658, .end = 0x38C658 },
1153 { .start = 0x38CFD4, .end = 0x38CFDC },
1154};
1155
f9d56cd6 1156static int mmio_range_cmp(u32 key, const struct i915_range *range)
5a659383 1157{
f9d56cd6 1158 if (key < range->start)
5a659383 1159 return -1;
f9d56cd6 1160 else if (key > range->end)
5a659383
TU
1161 return 1;
1162 else
1163 return 0;
1164}
1165
6cdbb101
MR
1166static bool is_shadowed(struct intel_uncore *uncore, u32 offset)
1167{
1168 if (drm_WARN_ON(&uncore->i915->drm, !uncore->shadowed_reg_table))
1169 return false;
6863b76c 1170
eefac38a
MR
1171 if (IS_GSI_REG(offset))
1172 offset += uncore->gsi_offset;
1173
6cdbb101
MR
1174 return BSEARCH(offset,
1175 uncore->shadowed_reg_table,
1176 uncore->shadowed_reg_table_entries,
1177 mmio_range_cmp);
1178}
a89a70a8 1179
ccb2acea
DCS
1180static enum forcewake_domains
1181gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
1182{
1183 return FORCEWAKE_RENDER;
1184}
1185
d32e8ed9
MR
1186#define __fwtable_reg_read_fw_domains(uncore, offset) \
1187({ \
1188 enum forcewake_domains __fwd = 0; \
1189 if (NEEDS_FORCE_WAKE((offset))) \
1190 __fwd = find_fw_domain(uncore, offset); \
1191 __fwd; \
1192})
1193
1194#define __fwtable_reg_write_fw_domains(uncore, offset) \
1195({ \
1196 enum forcewake_domains __fwd = 0; \
1197 const u32 __offset = (offset); \
1198 if (NEEDS_FORCE_WAKE((__offset)) && !is_shadowed(uncore, __offset)) \
1199 __fwd = find_fw_domain(uncore, __offset); \
1200 __fwd; \
1201})
1202
1203#define GEN_FW_RANGE(s, e, d) \
1204 { .start = (s), .end = (e), .domains = (d) }
1205
1206/*
1207 * All platforms' forcewake tables below must be sorted by offset ranges.
1208 * Furthermore, new forcewake tables added should be "watertight" and have
1209 * no gaps between ranges.
1210 *
1211 * When there are multiple consecutive ranges listed in the bspec with
1212 * the same forcewake domain, it is customary to combine them into a single
1213 * row in the tables below to keep the tables small and lookups fast.
1214 * Likewise, reserved/unused ranges may be combined with the preceding and/or
1215 * following ranges since the driver will never be making MMIO accesses in
1216 * those ranges.
1217 *
1218 * For example, if the bspec were to list:
1219 *
1220 * ...
1221 * 0x1000 - 0x1fff: GT
1222 * 0x2000 - 0x2cff: GT
1223 * 0x2d00 - 0x2fff: unused/reserved
1224 * 0x3000 - 0xffff: GT
1225 * ...
1226 *
1227 * these could all be represented by a single line in the code:
1228 *
1229 * GEN_FW_RANGE(0x1000, 0xffff, FORCEWAKE_GT)
1230 *
1231 * When adding new forcewake tables here, please also add them to
1232 * intel_uncore_mock_selftests in selftests/intel_uncore.c so that they will be
1233 * scanned for obvious mistakes or typos by the selftests.
1234 */
1235
1ab2b4cd
MR
1236static const struct intel_forcewake_range __gen6_fw_ranges[] = {
1237 GEN_FW_RANGE(0x0, 0x3ffff, FORCEWAKE_RENDER),
1238};
6863b76c 1239
d32e8ed9
MR
1240static const struct intel_forcewake_range __vlv_fw_ranges[] = {
1241 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
1242 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
1243 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
1244 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1245 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
1246 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
1247 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
1248};
1249
9fc1117c
TU
1250static const struct intel_forcewake_range __chv_fw_ranges[] = {
1251 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
b0081239 1252 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c 1253 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
b0081239 1254 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c 1255 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
b0081239 1256 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c 1257 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
b0081239
TU
1258 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1259 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
9fc1117c 1260 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
b0081239
TU
1261 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
1262 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
9fc1117c
TU
1263 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1264 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
1265 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
1266 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
9fc1117c 1267};
38fb6a40 1268
9fc1117c 1269static const struct intel_forcewake_range __gen9_fw_ranges[] = {
55e3c170 1270 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_GT),
9fc1117c
TU
1271 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
1272 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
55e3c170 1273 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
9fc1117c 1274 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
55e3c170 1275 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT),
9fc1117c 1276 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
55e3c170 1277 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_GT),
b0081239 1278 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
9fc1117c 1279 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
55e3c170 1280 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT),
9fc1117c 1281 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
55e3c170 1282 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT),
b0081239 1283 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
55e3c170 1284 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_GT),
9fc1117c 1285 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
55e3c170 1286 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_GT),
9fc1117c 1287 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
55e3c170 1288 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT),
b0081239 1289 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
55e3c170 1290 GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_GT),
9fc1117c 1291 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
55e3c170 1292 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_GT),
b0081239 1293 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
55e3c170 1294 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_GT),
9fc1117c 1295 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
55e3c170 1296 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_GT),
9fc1117c 1297 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
55e3c170 1298 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_GT),
b0081239 1299 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
55e3c170 1300 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_GT),
9fc1117c
TU
1301 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
1302};
6863b76c 1303
a89a70a8 1304static const struct intel_forcewake_range __gen11_fw_ranges[] = {
c4310def 1305 GEN_FW_RANGE(0x0, 0x1fff, 0), /* uncore range */
a89a70a8 1306 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
55e3c170 1307 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
a89a70a8 1308 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
55e3c170 1309 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT),
a89a70a8 1310 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
55e3c170 1311 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
a89a70a8 1312 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
55e3c170 1313 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT),
a89a70a8 1314 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
55e3c170 1315 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT),
c4310def 1316 GEN_FW_RANGE(0x8800, 0x8bff, 0),
a89a70a8 1317 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
55e3c170 1318 GEN_FW_RANGE(0x8d00, 0x94cf, FORCEWAKE_GT),
c4310def
RS
1319 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1320 GEN_FW_RANGE(0x9560, 0x95ff, 0),
55e3c170 1321 GEN_FW_RANGE(0x9600, 0xafff, FORCEWAKE_GT),
a89a70a8 1322 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
55e3c170 1323 GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_GT),
c9f8d187 1324 GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER),
55e3c170 1325 GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_GT),
c9f8d187 1326 GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER),
55e3c170 1327 GEN_FW_RANGE(0x1a000, 0x23fff, FORCEWAKE_GT),
c4310def 1328 GEN_FW_RANGE(0x24000, 0x2407f, 0),
55e3c170 1329 GEN_FW_RANGE(0x24080, 0x2417f, FORCEWAKE_GT),
c4310def 1330 GEN_FW_RANGE(0x24180, 0x242ff, FORCEWAKE_RENDER),
55e3c170 1331 GEN_FW_RANGE(0x24300, 0x243ff, FORCEWAKE_GT),
c4310def 1332 GEN_FW_RANGE(0x24400, 0x24fff, FORCEWAKE_RENDER),
55e3c170 1333 GEN_FW_RANGE(0x25000, 0x3ffff, FORCEWAKE_GT),
a89a70a8
DCS
1334 GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1335 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
c4310def
RS
1336 GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
1337 GEN_FW_RANGE(0x1c8000, 0x1cffff, FORCEWAKE_MEDIA_VEBOX0),
a89a70a8 1338 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
c4310def 1339 GEN_FW_RANGE(0x1d4000, 0x1dbfff, 0)
a89a70a8
DCS
1340};
1341
cf82d9dd 1342static const struct intel_forcewake_range __gen12_fw_ranges[] = {
92f5df0d
MR
1343 GEN_FW_RANGE(0x0, 0x1fff, 0), /*
1344 0x0 - 0xaff: reserved
1345 0xb00 - 0x1fff: always on */
cf82d9dd 1346 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
92f5df0d
MR
1347 GEN_FW_RANGE(0x2700, 0x27ff, FORCEWAKE_GT),
1348 GEN_FW_RANGE(0x2800, 0x2aff, FORCEWAKE_RENDER),
1349 GEN_FW_RANGE(0x2b00, 0x2fff, FORCEWAKE_GT),
cf82d9dd 1350 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
92f5df0d
MR
1351 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT), /*
1352 0x4000 - 0x48ff: gt
1353 0x4900 - 0x51ff: reserved */
1354 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), /*
1355 0x5200 - 0x53ff: render
1356 0x5400 - 0x54ff: reserved
1357 0x5500 - 0x7fff: render */
55e3c170 1358 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
cf82d9dd 1359 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
92f5df0d
MR
1360 GEN_FW_RANGE(0x8160, 0x81ff, 0), /*
1361 0x8160 - 0x817f: reserved
1362 0x8180 - 0x81ff: always on */
1363 GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT),
cf82d9dd 1364 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
92f5df0d
MR
1365 GEN_FW_RANGE(0x8500, 0x94cf, FORCEWAKE_GT), /*
1366 0x8500 - 0x87ff: gt
1367 0x8800 - 0x8fff: reserved
1368 0x9000 - 0x947f: gt
1369 0x9480 - 0x94cf: reserved */
1370 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1371 GEN_FW_RANGE(0x9560, 0x97ff, 0), /*
1372 0x9560 - 0x95ff: always on
1373 0x9600 - 0x97ff: reserved */
55e3c170 1374 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT),
92f5df0d
MR
1375 GEN_FW_RANGE(0xb000, 0xb3ff, FORCEWAKE_RENDER),
1376 GEN_FW_RANGE(0xb400, 0xcfff, FORCEWAKE_GT), /*
1377 0xb400 - 0xbf7f: gt
1378 0xb480 - 0xbfff: reserved
1379 0xc000 - 0xcfff: gt */
1380 GEN_FW_RANGE(0xd000, 0xd7ff, 0),
1381 GEN_FW_RANGE(0xd800, 0xd8ff, FORCEWAKE_RENDER),
1382 GEN_FW_RANGE(0xd900, 0xdbff, FORCEWAKE_GT),
1383 GEN_FW_RANGE(0xdc00, 0xefff, FORCEWAKE_RENDER), /*
1384 0xdc00 - 0xddff: render
1385 0xde00 - 0xde7f: reserved
1386 0xde80 - 0xe8ff: render
1387 0xe900 - 0xefff: reserved */
1388 GEN_FW_RANGE(0xf000, 0x147ff, FORCEWAKE_GT), /*
1389 0xf000 - 0xffff: gt
1390 0x10000 - 0x147ff: reserved */
1391 GEN_FW_RANGE(0x14800, 0x1ffff, FORCEWAKE_RENDER), /*
1392 0x14800 - 0x14fff: render
1393 0x15000 - 0x16dff: reserved
1394 0x16e00 - 0x1bfff: render
1395 0x1c000 - 0x1ffff: reserved */
1396 GEN_FW_RANGE(0x20000, 0x20fff, FORCEWAKE_MEDIA_VDBOX0),
1397 GEN_FW_RANGE(0x21000, 0x21fff, FORCEWAKE_MEDIA_VDBOX2),
1398 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
1399 GEN_FW_RANGE(0x24000, 0x2417f, 0), /*
1400 0x24000 - 0x2407f: always on
1401 0x24080 - 0x2417f: reserved */
1402 GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /*
1403 0x24180 - 0x241ff: gt
1404 0x24200 - 0x249ff: reserved */
1405 GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /*
1406 0x24a00 - 0x24a7f: render
1407 0x24a80 - 0x251ff: reserved */
1408 GEN_FW_RANGE(0x25200, 0x255ff, FORCEWAKE_GT), /*
1409 0x25200 - 0x252ff: gt
1410 0x25300 - 0x255ff: reserved */
1411 GEN_FW_RANGE(0x25600, 0x2567f, FORCEWAKE_MEDIA_VDBOX0),
1412 GEN_FW_RANGE(0x25680, 0x259ff, FORCEWAKE_MEDIA_VDBOX2), /*
1413 0x25680 - 0x256ff: VD2
1414 0x25700 - 0x259ff: reserved */
1415 GEN_FW_RANGE(0x25a00, 0x25a7f, FORCEWAKE_MEDIA_VDBOX0),
1416 GEN_FW_RANGE(0x25a80, 0x2ffff, FORCEWAKE_MEDIA_VDBOX2), /*
1417 0x25a80 - 0x25aff: VD2
1418 0x25b00 - 0x2ffff: reserved */
1419 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
cf82d9dd 1420 GEN_FW_RANGE(0x40000, 0x1bffff, 0),
92f5df0d
MR
1421 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /*
1422 0x1c0000 - 0x1c2bff: VD0
1423 0x1c2c00 - 0x1c2cff: reserved
1424 0x1c2d00 - 0x1c2dff: VD0
1425 0x1c2e00 - 0x1c3eff: reserved
1426 0x1c3f00 - 0x1c3fff: VD0 */
1427 GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
1428 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*
1429 0x1c8000 - 0x1ca0ff: VE0
1430 0x1ca100 - 0x1cbeff: reserved
1431 0x1cbf00 - 0x1cbfff: VE0 */
1432 GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX0), /*
1433 0x1cc000 - 0x1ccfff: VD0
1434 0x1cd000 - 0x1cffff: reserved */
1435 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /*
1436 0x1d0000 - 0x1d2bff: VD2
1437 0x1d2c00 - 0x1d2cff: reserved
1438 0x1d2d00 - 0x1d2dff: VD2
1439 0x1d2e00 - 0x1d3eff: reserved
1440 0x1d3f00 - 0x1d3fff: VD2 */
cf82d9dd
MT
1441};
1442
e0531636 1443static const struct intel_forcewake_range __dg2_fw_ranges[] = {
ecab2a6e
LDM
1444 GEN_FW_RANGE(0x0, 0x1fff, 0), /*
1445 0x0 - 0xaff: reserved
1446 0xb00 - 0x1fff: always on */
1447 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1448 GEN_FW_RANGE(0x2700, 0x4aff, FORCEWAKE_GT),
1449 GEN_FW_RANGE(0x4b00, 0x51ff, 0), /*
1450 0x4b00 - 0x4fff: reserved
1451 0x5000 - 0x51ff: always on */
1452 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1453 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1454 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1455 GEN_FW_RANGE(0x8160, 0x81ff, 0), /*
1456 0x8160 - 0x817f: reserved
1457 0x8180 - 0x81ff: always on */
1458 GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT),
1459 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1460 GEN_FW_RANGE(0x8500, 0x8cff, FORCEWAKE_GT), /*
1461 0x8500 - 0x87ff: gt
1462 0x8800 - 0x8c7f: reserved
1463 0x8c80 - 0x8cff: gt (DG2 only) */
1464 GEN_FW_RANGE(0x8d00, 0x8fff, FORCEWAKE_RENDER), /*
1465 0x8d00 - 0x8dff: render (DG2 only)
1466 0x8e00 - 0x8fff: reserved */
1467 GEN_FW_RANGE(0x9000, 0x94cf, FORCEWAKE_GT), /*
1468 0x9000 - 0x947f: gt
1469 0x9480 - 0x94cf: reserved */
1470 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1471 GEN_FW_RANGE(0x9560, 0x967f, 0), /*
1472 0x9560 - 0x95ff: always on
1473 0x9600 - 0x967f: reserved */
1474 GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER), /*
1475 0x9680 - 0x96ff: render
1476 0x9700 - 0x97ff: reserved */
1477 GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /*
1478 0x9800 - 0xb4ff: gt
1479 0xb500 - 0xbfff: reserved
1480 0xc000 - 0xcfff: gt */
1481 GEN_FW_RANGE(0xd000, 0xd7ff, 0),
1482 GEN_FW_RANGE(0xd800, 0xd87f, FORCEWAKE_RENDER),
1483 GEN_FW_RANGE(0xd880, 0xdbff, FORCEWAKE_GT),
1484 GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER),
1485 GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /*
1486 0xdd00 - 0xddff: gt
1487 0xde00 - 0xde7f: reserved */
1488 GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /*
1489 0xde80 - 0xdfff: render
1490 0xe000 - 0xe0ff: reserved
1491 0xe100 - 0xe8ff: render */
1492 GEN_FW_RANGE(0xe900, 0xffff, FORCEWAKE_GT), /*
1493 0xe900 - 0xe9ff: gt
1494 0xea00 - 0xefff: reserved
1495 0xf000 - 0xffff: gt */
1496 GEN_FW_RANGE(0x10000, 0x12fff, 0), /*
1497 0x10000 - 0x11fff: reserved
1498 0x12000 - 0x127ff: always on
1499 0x12800 - 0x12fff: reserved */
1500 GEN_FW_RANGE(0x13000, 0x131ff, FORCEWAKE_MEDIA_VDBOX0),
1501 GEN_FW_RANGE(0x13200, 0x147ff, FORCEWAKE_MEDIA_VDBOX2), /*
1502 0x13200 - 0x133ff: VD2 (DG2 only)
1503 0x13400 - 0x147ff: reserved */
1504 GEN_FW_RANGE(0x14800, 0x14fff, FORCEWAKE_RENDER),
1505 GEN_FW_RANGE(0x15000, 0x16dff, FORCEWAKE_GT), /*
1506 0x15000 - 0x15fff: gt (DG2 only)
1507 0x16000 - 0x16dff: reserved */
1508 GEN_FW_RANGE(0x16e00, 0x21fff, FORCEWAKE_RENDER), /*
1509 0x16e00 - 0x1ffff: render
1510 0x20000 - 0x21fff: reserved */
1511 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
1512 GEN_FW_RANGE(0x24000, 0x2417f, 0), /*
1513 0x24000 - 0x2407f: always on
1514 0x24080 - 0x2417f: reserved */
1515 GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /*
1516 0x24180 - 0x241ff: gt
1517 0x24200 - 0x249ff: reserved */
1518 GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /*
1519 0x24a00 - 0x24a7f: render
1520 0x24a80 - 0x251ff: reserved */
1521 GEN_FW_RANGE(0x25200, 0x25fff, FORCEWAKE_GT), /*
1522 0x25200 - 0x252ff: gt
1523 0x25300 - 0x25fff: reserved */
1524 GEN_FW_RANGE(0x26000, 0x2ffff, FORCEWAKE_RENDER), /*
1525 0x26000 - 0x27fff: render
1526 0x28000 - 0x29fff: reserved
1527 0x2a000 - 0x2ffff: undocumented */
1528 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
1529 GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1530 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /*
1531 0x1c0000 - 0x1c2bff: VD0
1532 0x1c2c00 - 0x1c2cff: reserved
1533 0x1c2d00 - 0x1c2dff: VD0
1534 0x1c2e00 - 0x1c3eff: VD0
1535 0x1c3f00 - 0x1c3fff: VD0 */
1536 GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), /*
1537 0x1c4000 - 0x1c6bff: VD1
1538 0x1c6c00 - 0x1c6cff: reserved
1539 0x1c6d00 - 0x1c6dff: VD1
1540 0x1c6e00 - 0x1c7fff: reserved */
1541 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*
1542 0x1c8000 - 0x1ca0ff: VE0
1543 0x1ca100 - 0x1cbfff: reserved */
1544 GEN_FW_RANGE(0x1cc000, 0x1ccfff, FORCEWAKE_MEDIA_VDBOX0),
1545 GEN_FW_RANGE(0x1cd000, 0x1cdfff, FORCEWAKE_MEDIA_VDBOX2),
1546 GEN_FW_RANGE(0x1ce000, 0x1cefff, FORCEWAKE_MEDIA_VDBOX4),
1547 GEN_FW_RANGE(0x1cf000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX6),
1548 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /*
1549 0x1d0000 - 0x1d2bff: VD2
1550 0x1d2c00 - 0x1d2cff: reserved
1551 0x1d2d00 - 0x1d2dff: VD2
1552 0x1d2e00 - 0x1d3dff: VD2
1553 0x1d3e00 - 0x1d3eff: reserved
1554 0x1d3f00 - 0x1d3fff: VD2 */
1555 GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), /*
1556 0x1d4000 - 0x1d6bff: VD3
1557 0x1d6c00 - 0x1d6cff: reserved
1558 0x1d6d00 - 0x1d6dff: VD3
1559 0x1d6e00 - 0x1d7fff: reserved */
1560 GEN_FW_RANGE(0x1d8000, 0x1dffff, FORCEWAKE_MEDIA_VEBOX1), /*
1561 0x1d8000 - 0x1da0ff: VE1
1562 0x1da100 - 0x1dffff: reserved */
1563 GEN_FW_RANGE(0x1e0000, 0x1e3fff, FORCEWAKE_MEDIA_VDBOX4), /*
1564 0x1e0000 - 0x1e2bff: VD4
1565 0x1e2c00 - 0x1e2cff: reserved
1566 0x1e2d00 - 0x1e2dff: VD4
1567 0x1e2e00 - 0x1e3eff: reserved
1568 0x1e3f00 - 0x1e3fff: VD4 */
1569 GEN_FW_RANGE(0x1e4000, 0x1e7fff, FORCEWAKE_MEDIA_VDBOX5), /*
1570 0x1e4000 - 0x1e6bff: VD5
1571 0x1e6c00 - 0x1e6cff: reserved
1572 0x1e6d00 - 0x1e6dff: VD5
1573 0x1e6e00 - 0x1e7fff: reserved */
1574 GEN_FW_RANGE(0x1e8000, 0x1effff, FORCEWAKE_MEDIA_VEBOX2), /*
1575 0x1e8000 - 0x1ea0ff: VE2
1576 0x1ea100 - 0x1effff: reserved */
1577 GEN_FW_RANGE(0x1f0000, 0x1f3fff, FORCEWAKE_MEDIA_VDBOX6), /*
1578 0x1f0000 - 0x1f2bff: VD6
1579 0x1f2c00 - 0x1f2cff: reserved
1580 0x1f2d00 - 0x1f2dff: VD6
1581 0x1f2e00 - 0x1f3eff: reserved
1582 0x1f3f00 - 0x1f3fff: VD6 */
1583 GEN_FW_RANGE(0x1f4000, 0x1f7fff, FORCEWAKE_MEDIA_VDBOX7), /*
1584 0x1f4000 - 0x1f6bff: VD7
1585 0x1f6c00 - 0x1f6cff: reserved
1586 0x1f6d00 - 0x1f6dff: VD7
1587 0x1f6e00 - 0x1f7fff: reserved */
1588 GEN_FW_RANGE(0x1f8000, 0x1fa0ff, FORCEWAKE_MEDIA_VEBOX3),
bfac1e2b
MR
1589};
1590
14f2f9bf
MR
1591static const struct intel_forcewake_range __mtl_fw_ranges[] = {
1592 GEN_FW_RANGE(0x0, 0xaff, 0),
1593 GEN_FW_RANGE(0xb00, 0xbff, FORCEWAKE_GT),
1594 GEN_FW_RANGE(0xc00, 0xfff, 0),
1595 GEN_FW_RANGE(0x1000, 0x1fff, FORCEWAKE_GT),
1596 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1597 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1598 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1599 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT), /*
1600 0x4000 - 0x48ff: render
1601 0x4900 - 0x51ff: reserved */
1602 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), /*
1603 0x5200 - 0x53ff: render
1604 0x5400 - 0x54ff: reserved
1605 0x5500 - 0x7fff: render */
1606 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1607 GEN_FW_RANGE(0x8140, 0x817f, FORCEWAKE_RENDER), /*
1608 0x8140 - 0x815f: render
1609 0x8160 - 0x817f: reserved */
1610 GEN_FW_RANGE(0x8180, 0x81ff, 0),
1611 GEN_FW_RANGE(0x8200, 0x94cf, FORCEWAKE_GT), /*
1612 0x8200 - 0x87ff: gt
1613 0x8800 - 0x8dff: reserved
1614 0x8e00 - 0x8f7f: gt
1615 0x8f80 - 0x8fff: reserved
1616 0x9000 - 0x947f: gt
1617 0x9480 - 0x94cf: reserved */
1618 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1619 GEN_FW_RANGE(0x9560, 0x967f, 0), /*
1620 0x9560 - 0x95ff: always on
1621 0x9600 - 0x967f: reserved */
1622 GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER), /*
1623 0x9680 - 0x96ff: render
1624 0x9700 - 0x97ff: reserved */
1625 GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /*
1626 0x9800 - 0xb4ff: gt
1627 0xb500 - 0xbfff: reserved
1628 0xc000 - 0xcfff: gt */
1629 GEN_FW_RANGE(0xd000, 0xd7ff, 0), /*
1630 0xd000 - 0xd3ff: always on
1631 0xd400 - 0xd7ff: reserved */
1632 GEN_FW_RANGE(0xd800, 0xd87f, FORCEWAKE_RENDER),
1633 GEN_FW_RANGE(0xd880, 0xdbff, FORCEWAKE_GT),
1634 GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER),
1635 GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /*
1636 0xdd00 - 0xddff: gt
1637 0xde00 - 0xde7f: reserved */
1638 GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /*
1639 0xde80 - 0xdfff: render
1640 0xe000 - 0xe0ff: reserved
1641 0xe100 - 0xe8ff: render */
1642 GEN_FW_RANGE(0xe900, 0xe9ff, FORCEWAKE_GT),
1643 GEN_FW_RANGE(0xea00, 0x147ff, 0), /*
1644 0xea00 - 0x11fff: reserved
1645 0x12000 - 0x127ff: always on
1646 0x12800 - 0x147ff: reserved */
1647 GEN_FW_RANGE(0x14800, 0x19fff, FORCEWAKE_GT), /*
1648 0x14800 - 0x153ff: gt
1649 0x15400 - 0x19fff: reserved */
1650 GEN_FW_RANGE(0x1a000, 0x21fff, FORCEWAKE_RENDER), /*
1651 0x1a000 - 0x1bfff: render
1652 0x1c000 - 0x21fff: reserved */
1653 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
1654 GEN_FW_RANGE(0x24000, 0x2ffff, 0), /*
1655 0x24000 - 0x2407f: always on
1656 0x24080 - 0x2ffff: reserved */
6d46d09a
VB
1657 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
1658 GEN_FW_RANGE(0x40000, 0x1901ef, 0),
1659 GEN_FW_RANGE(0x1901f0, 0x1901f3, FORCEWAKE_GT)
1660 /* FIXME: WA to wake GT while triggering H2G */
14f2f9bf
MR
1661};
1662
1663/*
1664 * Note that the register ranges here are the final offsets after
1665 * translation of the GSI block to the 0x380000 offset.
1666 *
1667 * NOTE: There are a couple MCR ranges near the bottom of this table
1668 * that need to power up either VD0 or VD2 depending on which replicated
1669 * instance of the register we're trying to access. Our forcewake logic
1670 * at the moment doesn't have a good way to take steering into consideration,
1671 * and the driver doesn't even access any registers in those ranges today,
1672 * so for now we just mark those ranges as FORCEWAKE_ALL. That will ensure
1673 * proper operation if we do start using the ranges in the future, and we
1674 * can determine at that time whether it's worth adding extra complexity to
1675 * the forcewake handling to take steering into consideration.
1676 */
1677static const struct intel_forcewake_range __xelpmp_fw_ranges[] = {
1678 GEN_FW_RANGE(0x0, 0x115fff, 0), /* render GT range */
1679 GEN_FW_RANGE(0x116000, 0x11ffff, FORCEWAKE_GSC), /*
1680 0x116000 - 0x117fff: gsc
1681 0x118000 - 0x119fff: reserved
1682 0x11a000 - 0x11efff: gsc
1683 0x11f000 - 0x11ffff: reserved */
1684 GEN_FW_RANGE(0x120000, 0x1bffff, 0), /* non-GT range */
1685 GEN_FW_RANGE(0x1c0000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX0), /*
1686 0x1c0000 - 0x1c3dff: VD0
1687 0x1c3e00 - 0x1c3eff: reserved
1688 0x1c3f00 - 0x1c3fff: VD0
1689 0x1c4000 - 0x1c7fff: reserved */
1690 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*
1691 0x1c8000 - 0x1ca0ff: VE0
1692 0x1ca100 - 0x1cbfff: reserved */
1693 GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX0), /*
1694 0x1cc000 - 0x1cdfff: VD0
1695 0x1ce000 - 0x1cffff: reserved */
1696 GEN_FW_RANGE(0x1d0000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX2), /*
1697 0x1d0000 - 0x1d3dff: VD2
1698 0x1d3e00 - 0x1d3eff: reserved
1699 0x1d4000 - 0x1d7fff: VD2 */
1700 GEN_FW_RANGE(0x1d8000, 0x1da0ff, FORCEWAKE_MEDIA_VEBOX1),
1701 GEN_FW_RANGE(0x1da100, 0x380aff, 0), /*
1702 0x1da100 - 0x23ffff: reserved
1703 0x240000 - 0x37ffff: non-GT range
1704 0x380000 - 0x380aff: reserved */
1705 GEN_FW_RANGE(0x380b00, 0x380bff, FORCEWAKE_GT),
1706 GEN_FW_RANGE(0x380c00, 0x380fff, 0),
1707 GEN_FW_RANGE(0x381000, 0x38817f, FORCEWAKE_GT), /*
1708 0x381000 - 0x381fff: gt
1709 0x382000 - 0x383fff: reserved
1710 0x384000 - 0x384aff: gt
1711 0x384b00 - 0x3851ff: reserved
1712 0x385200 - 0x3871ff: gt
1713 0x387200 - 0x387fff: reserved
1714 0x388000 - 0x38813f: gt
1715 0x388140 - 0x38817f: reserved */
1716 GEN_FW_RANGE(0x388180, 0x3882ff, 0), /*
1717 0x388180 - 0x3881ff: always on
1718 0x388200 - 0x3882ff: reserved */
1719 GEN_FW_RANGE(0x388300, 0x38955f, FORCEWAKE_GT), /*
1720 0x388300 - 0x38887f: gt
1721 0x388880 - 0x388fff: reserved
1722 0x389000 - 0x38947f: gt
1723 0x389480 - 0x38955f: reserved */
1724 GEN_FW_RANGE(0x389560, 0x389fff, 0), /*
1725 0x389560 - 0x3895ff: always on
1726 0x389600 - 0x389fff: reserved */
1727 GEN_FW_RANGE(0x38a000, 0x38cfff, FORCEWAKE_GT), /*
1728 0x38a000 - 0x38afff: gt
1729 0x38b000 - 0x38bfff: reserved
1730 0x38c000 - 0x38cfff: gt */
1731 GEN_FW_RANGE(0x38d000, 0x38d11f, 0),
1732 GEN_FW_RANGE(0x38d120, 0x391fff, FORCEWAKE_GT), /*
1733 0x38d120 - 0x38dfff: gt
1734 0x38e000 - 0x38efff: reserved
1735 0x38f000 - 0x38ffff: gt
1736 0x389000 - 0x391fff: reserved */
1737 GEN_FW_RANGE(0x392000, 0x392fff, 0), /*
1738 0x392000 - 0x3927ff: always on
1739 0x392800 - 0x292fff: reserved */
1740 GEN_FW_RANGE(0x393000, 0x3931ff, FORCEWAKE_GT),
1741 GEN_FW_RANGE(0x393200, 0x39323f, FORCEWAKE_ALL), /* instance-based, see note above */
1742 GEN_FW_RANGE(0x393240, 0x3933ff, FORCEWAKE_GT),
1743 GEN_FW_RANGE(0x393400, 0x3934ff, FORCEWAKE_ALL), /* instance-based, see note above */
1744 GEN_FW_RANGE(0x393500, 0x393c7f, 0), /*
1745 0x393500 - 0x393bff: reserved
1746 0x393c00 - 0x393c7f: always on */
1747 GEN_FW_RANGE(0x393c80, 0x393dff, FORCEWAKE_GT),
1748};
1749
907b28c5 1750static void
6ebc9692 1751ilk_dummy_write(struct intel_uncore *uncore)
907b28c5
CW
1752{
1753 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
1754 * the chip from rc6 before touching it for real. MI_MODE is masked,
1755 * hence harmless to write 0 into. */
ab9e00a3 1756 __raw_uncore_write32(uncore, RING_MI_MODE(RENDER_RING_BASE), 0);
907b28c5
CW
1757}
1758
1759static void
2cf7bf6f 1760__unclaimed_reg_debug(struct intel_uncore *uncore,
9c053501 1761 const i915_reg_t reg,
4b276ed3 1762 const bool read)
907b28c5 1763{
a9f236d1 1764 if (drm_WARN(&uncore->i915->drm,
4b276ed3 1765 check_for_unclaimed_mmio(uncore),
a9f236d1
PB
1766 "Unclaimed %s register 0x%x\n",
1767 read ? "read from" : "write to",
1768 i915_mmio_reg_offset(reg)))
4f044a88 1769 /* Only report the first N failures */
8a25c4be 1770 uncore->i915->params.mmio_debug--;
907b28c5
CW
1771}
1772
4b276ed3
LDM
1773static void
1774__unclaimed_previous_reg_debug(struct intel_uncore *uncore,
1775 const i915_reg_t reg,
1776 const bool read)
1777{
618f5df1
LDM
1778 if (check_for_unclaimed_mmio(uncore))
1779 drm_dbg(&uncore->i915->drm,
1780 "Unclaimed access detected before %s register 0x%x\n",
1781 read ? "read from" : "write to",
1782 i915_mmio_reg_offset(reg));
4b276ed3
LDM
1783}
1784
d823445b 1785static inline bool __must_check
7afe2340
JN
1786unclaimed_reg_debug_header(struct intel_uncore *uncore,
1787 const i915_reg_t reg, const bool read)
9c053501 1788{
639e30ee 1789 if (likely(!uncore->i915->params.mmio_debug) || !uncore->debug)
d823445b 1790 return false;
9c053501 1791
0a9b2630
DCS
1792 /* interrupts are disabled and re-enabled around uncore->lock usage */
1793 lockdep_assert_held(&uncore->lock);
1794
7afe2340
JN
1795 spin_lock(&uncore->debug->lock);
1796 __unclaimed_previous_reg_debug(uncore, reg, read);
d823445b
JN
1797
1798 return true;
7afe2340
JN
1799}
1800
1801static inline void
1802unclaimed_reg_debug_footer(struct intel_uncore *uncore,
1803 const i915_reg_t reg, const bool read)
1804{
7afe2340
JN
1805 /* interrupts are disabled and re-enabled around uncore->lock usage */
1806 lockdep_assert_held(&uncore->lock);
1807
1808 __unclaimed_reg_debug(uncore, reg, read);
1809 spin_unlock(&uncore->debug->lock);
9c053501
MK
1810}
1811
0e65ce24
CW
1812#define __vgpu_read(x) \
1813static u##x \
1814vgpu_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1815 u##x val = __raw_uncore_read##x(uncore, reg); \
1816 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1817 return val; \
1818}
1819__vgpu_read(8)
1820__vgpu_read(16)
1821__vgpu_read(32)
1822__vgpu_read(64)
1823
51f67885 1824#define GEN2_READ_HEADER(x) \
5d738795 1825 u##x val = 0; \
87b391b9 1826 assert_rpm_wakelock_held(uncore->rpm);
5d738795 1827
51f67885 1828#define GEN2_READ_FOOTER \
5d738795
BW
1829 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1830 return val
1831
51f67885 1832#define __gen2_read(x) \
0b274481 1833static u##x \
a2b4abfc 1834gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
51f67885 1835 GEN2_READ_HEADER(x); \
6cc5ca76 1836 val = __raw_uncore_read##x(uncore, reg); \
51f67885 1837 GEN2_READ_FOOTER; \
3967018e
BW
1838}
1839
1840#define __gen5_read(x) \
1841static u##x \
a2b4abfc 1842gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
51f67885 1843 GEN2_READ_HEADER(x); \
6ebc9692 1844 ilk_dummy_write(uncore); \
6cc5ca76 1845 val = __raw_uncore_read##x(uncore, reg); \
51f67885 1846 GEN2_READ_FOOTER; \
3967018e
BW
1847}
1848
51f67885
CW
1849__gen5_read(8)
1850__gen5_read(16)
1851__gen5_read(32)
1852__gen5_read(64)
1853__gen2_read(8)
1854__gen2_read(16)
1855__gen2_read(32)
1856__gen2_read(64)
1857
1858#undef __gen5_read
1859#undef __gen2_read
1860
1861#undef GEN2_READ_FOOTER
1862#undef GEN2_READ_HEADER
1863
1864#define GEN6_READ_HEADER(x) \
f0f59a00 1865 u32 offset = i915_mmio_reg_offset(reg); \
51f67885 1866 unsigned long irqflags; \
d823445b 1867 bool unclaimed_reg_debug; \
51f67885 1868 u##x val = 0; \
87b391b9 1869 assert_rpm_wakelock_held(uncore->rpm); \
272c7e52 1870 spin_lock_irqsave(&uncore->lock, irqflags); \
d823445b 1871 unclaimed_reg_debug = unclaimed_reg_debug_header(uncore, reg, true)
51f67885
CW
1872
1873#define GEN6_READ_FOOTER \
d823445b
JN
1874 if (unclaimed_reg_debug) \
1875 unclaimed_reg_debug_footer(uncore, reg, true); \
272c7e52 1876 spin_unlock_irqrestore(&uncore->lock, irqflags); \
51f67885
CW
1877 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1878 return val
1879
f568eeee 1880static noinline void ___force_wake_auto(struct intel_uncore *uncore,
c521b0c8 1881 enum forcewake_domains fw_domains)
b2cff0db
CW
1882{
1883 struct intel_uncore_forcewake_domain *domain;
d2dc94bc
CW
1884 unsigned int tmp;
1885
f568eeee 1886 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
b2cff0db 1887
f568eeee 1888 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)
c521b0c8
TU
1889 fw_domain_arm_timer(domain);
1890
5716c8c6 1891 fw_domains_get(uncore, fw_domains);
c521b0c8
TU
1892}
1893
f568eeee 1894static inline void __force_wake_auto(struct intel_uncore *uncore,
c521b0c8
TU
1895 enum forcewake_domains fw_domains)
1896{
77adbd8f 1897 GEM_BUG_ON(!fw_domains);
b2cff0db 1898
003342a5 1899 /* Turn on all requested but inactive supported forcewake domains. */
f568eeee
DCS
1900 fw_domains &= uncore->fw_domains;
1901 fw_domains &= ~uncore->fw_domains_active;
b2cff0db 1902
c521b0c8 1903 if (fw_domains)
f568eeee 1904 ___force_wake_auto(uncore, fw_domains);
b2cff0db
CW
1905}
1906
e5b32ae3 1907#define __gen_fwtable_read(x) \
3967018e 1908static u##x \
e5b32ae3
MR
1909fwtable_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) \
1910{ \
6863b76c 1911 enum forcewake_domains fw_engine; \
51f67885 1912 GEN6_READ_HEADER(x); \
e5b32ae3 1913 fw_engine = __fwtable_reg_read_fw_domains(uncore, offset); \
6a42d0f4 1914 if (fw_engine) \
272c7e52 1915 __force_wake_auto(uncore, fw_engine); \
6cc5ca76 1916 val = __raw_uncore_read##x(uncore, reg); \
51f67885 1917 GEN6_READ_FOOTER; \
940aece4 1918}
ccb2acea 1919
e5b32ae3
MR
1920static enum forcewake_domains
1921fwtable_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) {
1922 return __fwtable_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg));
1923}
ccb2acea 1924
e5b32ae3
MR
1925__gen_fwtable_read(8)
1926__gen_fwtable_read(16)
1927__gen_fwtable_read(32)
1928__gen_fwtable_read(64)
ccb2acea 1929
e5b32ae3 1930#undef __gen_fwtable_read
51f67885
CW
1931#undef GEN6_READ_FOOTER
1932#undef GEN6_READ_HEADER
5d738795 1933
51f67885 1934#define GEN2_WRITE_HEADER \
5d738795 1935 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
87b391b9 1936 assert_rpm_wakelock_held(uncore->rpm); \
907b28c5 1937
51f67885 1938#define GEN2_WRITE_FOOTER
0d965301 1939
51f67885 1940#define __gen2_write(x) \
0b274481 1941static void \
a2b4abfc 1942gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
51f67885 1943 GEN2_WRITE_HEADER; \
6cc5ca76 1944 __raw_uncore_write##x(uncore, reg, val); \
51f67885 1945 GEN2_WRITE_FOOTER; \
4032ef43
BW
1946}
1947
1948#define __gen5_write(x) \
1949static void \
a2b4abfc 1950gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
51f67885 1951 GEN2_WRITE_HEADER; \
6ebc9692 1952 ilk_dummy_write(uncore); \
6cc5ca76 1953 __raw_uncore_write##x(uncore, reg, val); \
51f67885 1954 GEN2_WRITE_FOOTER; \
4032ef43
BW
1955}
1956
51f67885
CW
1957__gen5_write(8)
1958__gen5_write(16)
1959__gen5_write(32)
51f67885
CW
1960__gen2_write(8)
1961__gen2_write(16)
1962__gen2_write(32)
51f67885
CW
1963
1964#undef __gen5_write
1965#undef __gen2_write
1966
1967#undef GEN2_WRITE_FOOTER
1968#undef GEN2_WRITE_HEADER
1969
1970#define GEN6_WRITE_HEADER \
f0f59a00 1971 u32 offset = i915_mmio_reg_offset(reg); \
51f67885 1972 unsigned long irqflags; \
d823445b 1973 bool unclaimed_reg_debug; \
51f67885 1974 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
87b391b9 1975 assert_rpm_wakelock_held(uncore->rpm); \
272c7e52 1976 spin_lock_irqsave(&uncore->lock, irqflags); \
d823445b 1977 unclaimed_reg_debug = unclaimed_reg_debug_header(uncore, reg, false)
51f67885
CW
1978
1979#define GEN6_WRITE_FOOTER \
d823445b
JN
1980 if (unclaimed_reg_debug) \
1981 unclaimed_reg_debug_footer(uncore, reg, false); \
272c7e52 1982 spin_unlock_irqrestore(&uncore->lock, irqflags)
51f67885 1983
4032ef43
BW
1984#define __gen6_write(x) \
1985static void \
a2b4abfc 1986gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
51f67885 1987 GEN6_WRITE_HEADER; \
a338908c 1988 if (NEEDS_FORCE_WAKE(offset)) \
6ebc9692 1989 __gen6_gt_wait_for_fifo(uncore); \
6cc5ca76 1990 __raw_uncore_write##x(uncore, reg, val); \
51f67885 1991 GEN6_WRITE_FOOTER; \
4032ef43 1992}
ccb2acea
DCS
1993__gen6_write(8)
1994__gen6_write(16)
1995__gen6_write(32)
4032ef43 1996
aef02736 1997#define __gen_fwtable_write(x) \
ab2aa47e 1998static void \
aef02736 1999fwtable_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
6863b76c 2000 enum forcewake_domains fw_engine; \
51f67885 2001 GEN6_WRITE_HEADER; \
aef02736 2002 fw_engine = __fwtable_reg_write_fw_domains(uncore, offset); \
6a42d0f4 2003 if (fw_engine) \
272c7e52 2004 __force_wake_auto(uncore, fw_engine); \
6cc5ca76 2005 __raw_uncore_write##x(uncore, reg, val); \
51f67885 2006 GEN6_WRITE_FOOTER; \
1938e59a 2007}
4032ef43 2008
aef02736
MR
2009static enum forcewake_domains
2010fwtable_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
2011{
2012 return __fwtable_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg));
2013}
ccb2acea 2014
aef02736
MR
2015__gen_fwtable_write(8)
2016__gen_fwtable_write(16)
2017__gen_fwtable_write(32)
ccb2acea 2018
aef02736 2019#undef __gen_fwtable_write
51f67885
CW
2020#undef GEN6_WRITE_FOOTER
2021#undef GEN6_WRITE_HEADER
907b28c5 2022
0e65ce24
CW
2023#define __vgpu_write(x) \
2024static void \
2025vgpu_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
2026 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
2027 __raw_uncore_write##x(uncore, reg, val); \
2028}
2029__vgpu_write(8)
2030__vgpu_write(16)
2031__vgpu_write(32)
2032
ccb2acea 2033#define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \
43d942a7 2034do { \
f7de5027
DCS
2035 (uncore)->funcs.mmio_writeb = x##_write8; \
2036 (uncore)->funcs.mmio_writew = x##_write16; \
2037 (uncore)->funcs.mmio_writel = x##_write32; \
43d942a7
YZ
2038} while (0)
2039
ccb2acea 2040#define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x) \
43d942a7 2041do { \
f7de5027
DCS
2042 (uncore)->funcs.mmio_readb = x##_read8; \
2043 (uncore)->funcs.mmio_readw = x##_read16; \
2044 (uncore)->funcs.mmio_readl = x##_read32; \
2045 (uncore)->funcs.mmio_readq = x##_read64; \
43d942a7
YZ
2046} while (0)
2047
ccb2acea
DCS
2048#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
2049do { \
2050 ASSIGN_RAW_WRITE_MMIO_VFUNCS((uncore), x); \
2051 (uncore)->funcs.write_fw_domains = x##_reg_write_fw_domains; \
2052} while (0)
2053
2054#define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
2055do { \
2056 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x); \
2057 (uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \
2058} while (0)
05a2fb15 2059
f833cdb0
DCS
2060static int __fw_domain_init(struct intel_uncore *uncore,
2061 enum forcewake_domain_id domain_id,
2062 i915_reg_t reg_set,
2063 i915_reg_t reg_ack)
05a2fb15
MK
2064{
2065 struct intel_uncore_forcewake_domain *d;
2066
f833cdb0
DCS
2067 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
2068 GEM_BUG_ON(uncore->fw_domain[domain_id]);
05a2fb15 2069
50d84418 2070 if (i915_inject_probe_failure(uncore->i915))
f833cdb0 2071 return -ENOMEM;
05a2fb15 2072
f833cdb0
DCS
2073 d = kzalloc(sizeof(*d), GFP_KERNEL);
2074 if (!d)
2075 return -ENOMEM;
05a2fb15 2076
a9f236d1
PB
2077 drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_set));
2078 drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_ack));
6e3955a5 2079
f833cdb0 2080 d->uncore = uncore;
05a2fb15 2081 d->wake_count = 0;
eefac38a
MR
2082 d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set) + uncore->gsi_offset;
2083 d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack) + uncore->gsi_offset;
05a2fb15 2084
05a2fb15
MK
2085 d->id = domain_id;
2086
33c582c1 2087 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
55e3c170 2088 BUILD_BUG_ON(FORCEWAKE_GT != (1 << FW_DOMAIN_ID_GT));
33c582c1 2089 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
a89a70a8
DCS
2090 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0));
2091 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1));
2092 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2));
2093 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3));
bfac1e2b
MR
2094 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX4 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX4));
2095 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX5 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX5));
2096 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX6 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX6));
2097 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX7 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX7));
a89a70a8
DCS
2098 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0));
2099 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1));
bfac1e2b
MR
2100 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX2));
2101 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX3));
14f2f9bf 2102 BUILD_BUG_ON(FORCEWAKE_GSC != (1 << FW_DOMAIN_ID_GSC));
a89a70a8 2103
d2dc94bc 2104 d->mask = BIT(domain_id);
33c582c1 2105
f97e1d78 2106 hrtimer_setup(&d->timer, intel_uncore_fw_release_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
05a2fb15 2107
535d8d27 2108 uncore->fw_domains |= BIT(domain_id);
f9b3927a 2109
159367bb 2110 fw_domain_reset(d);
f833cdb0
DCS
2111
2112 uncore->fw_domain[domain_id] = d;
2113
2114 return 0;
05a2fb15
MK
2115}
2116
f7de5027 2117static void fw_domain_fini(struct intel_uncore *uncore,
26376a7e
OM
2118 enum forcewake_domain_id domain_id)
2119{
2120 struct intel_uncore_forcewake_domain *d;
2121
f833cdb0 2122 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
26376a7e 2123
f833cdb0
DCS
2124 d = fetch_and_zero(&uncore->fw_domain[domain_id]);
2125 if (!d)
2126 return;
26376a7e 2127
f833cdb0 2128 uncore->fw_domains &= ~BIT(domain_id);
a9f236d1
PB
2129 drm_WARN_ON(&uncore->i915->drm, d->wake_count);
2130 drm_WARN_ON(&uncore->i915->drm, hrtimer_cancel(&d->timer));
f833cdb0
DCS
2131 kfree(d);
2132}
26376a7e 2133
f833cdb0
DCS
2134static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore)
2135{
2136 struct intel_uncore_forcewake_domain *d;
2137 int tmp;
2138
2139 for_each_fw_domain(d, uncore, tmp)
2140 fw_domain_fini(uncore, d->id);
26376a7e
OM
2141}
2142
5716c8c6
DA
2143static const struct intel_uncore_fw_get uncore_get_fallback = {
2144 .force_wake_get = fw_domains_get_with_fallback
2145};
2146
2147static const struct intel_uncore_fw_get uncore_get_normal = {
2148 .force_wake_get = fw_domains_get_normal,
2149};
2150
2151static const struct intel_uncore_fw_get uncore_get_thread_status = {
2152 .force_wake_get = fw_domains_get_with_thread_status
2153};
2154
f833cdb0 2155static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
0b274481 2156{
01385758 2157 struct drm_i915_private *i915 = uncore->i915;
f833cdb0 2158 int ret = 0;
f7de5027 2159
2e81bc61 2160 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
3225b2f9 2161
f833cdb0
DCS
2162#define fw_domain_init(uncore__, id__, set__, ack__) \
2163 (ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__))))
2164
651e7d48 2165 if (GRAPHICS_VER(i915) >= 11) {
14f2f9bf 2166 intel_engine_mask_t emask;
a89a70a8
DCS
2167 int i;
2168
14f2f9bf
MR
2169 /* we'll prune the domains of missing engines later */
2170 emask = uncore->gt->info.engine_mask;
2171
5716c8c6 2172 uncore->fw_get_funcs = &uncore_get_fallback;
14f2f9bf
MR
2173 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
2174 fw_domain_init(uncore, FW_DOMAIN_ID_GT,
2175 FORCEWAKE_GT_GEN9,
2176 FORCEWAKE_ACK_GT_MTL);
2177 else
2178 fw_domain_init(uncore, FW_DOMAIN_ID_GT,
2179 FORCEWAKE_GT_GEN9,
2180 FORCEWAKE_ACK_GT_GEN9);
2181
2182 if (RCS_MASK(uncore->gt) || CCS_MASK(uncore->gt))
2183 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2184 FORCEWAKE_RENDER_GEN9,
2185 FORCEWAKE_ACK_RENDER_GEN9);
f833cdb0 2186
a89a70a8 2187 for (i = 0; i < I915_MAX_VCS; i++) {
242613af 2188 if (!__HAS_ENGINE(emask, _VCS(i)))
a89a70a8
DCS
2189 continue;
2190
f7de5027 2191 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
a89a70a8
DCS
2192 FORCEWAKE_MEDIA_VDBOX_GEN11(i),
2193 FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
2194 }
2195 for (i = 0; i < I915_MAX_VECS; i++) {
242613af 2196 if (!__HAS_ENGINE(emask, _VECS(i)))
a89a70a8
DCS
2197 continue;
2198
f7de5027 2199 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
a89a70a8
DCS
2200 FORCEWAKE_MEDIA_VEBOX_GEN11(i),
2201 FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
2202 }
14f2f9bf
MR
2203
2204 if (uncore->gt->type == GT_MEDIA)
2205 fw_domain_init(uncore, FW_DOMAIN_ID_GSC,
2206 FORCEWAKE_REQ_GSC, FORCEWAKE_ACK_GSC);
651e7d48 2207 } else if (IS_GRAPHICS_VER(i915, 9, 10)) {
5716c8c6 2208 uncore->fw_get_funcs = &uncore_get_fallback;
f7de5027 2209 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
05a2fb15
MK
2210 FORCEWAKE_RENDER_GEN9,
2211 FORCEWAKE_ACK_RENDER_GEN9);
55e3c170
MR
2212 fw_domain_init(uncore, FW_DOMAIN_ID_GT,
2213 FORCEWAKE_GT_GEN9,
2214 FORCEWAKE_ACK_GT_GEN9);
f7de5027 2215 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
05a2fb15 2216 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
f7de5027 2217 } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
5716c8c6 2218 uncore->fw_get_funcs = &uncore_get_normal;
f7de5027 2219 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
05a2fb15 2220 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
f7de5027 2221 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
05a2fb15 2222 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
f7de5027 2223 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5716c8c6 2224 uncore->fw_get_funcs = &uncore_get_thread_status;
f7de5027 2225 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
05a2fb15 2226 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
f7de5027 2227 } else if (IS_IVYBRIDGE(i915)) {
0b274481
BW
2228 u32 ecobus;
2229
2230 /* IVB configs may use multi-threaded forcewake */
2231
2232 /* A small trick here - if the bios hasn't configured
2233 * MT forcewake, and if the device is in RC6, then
2234 * force_wake_mt_get will not wake the device and the
2235 * ECOBUS read will return zero. Which will be
2236 * (correctly) interpreted by the test below as MT
2237 * forcewake being disabled.
2238 */
5716c8c6 2239 uncore->fw_get_funcs = &uncore_get_thread_status;
05a2fb15 2240
f9b3927a
MK
2241 /* We need to init first for ECOBUS access and then
2242 * determine later if we want to reinit, in case of MT access is
6ea2556f
MK
2243 * not working. In this stage we don't know which flavour this
2244 * ivb is, so it is better to reset also the gen6 fw registers
2245 * before the ecobus check.
f9b3927a 2246 */
6ea2556f 2247
6cc5ca76 2248 __raw_uncore_write32(uncore, FORCEWAKE, 0);
6ebc9692 2249 __raw_posting_read(uncore, ECOBUS);
6ea2556f 2250
f833cdb0
DCS
2251 ret = __fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2252 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
2253 if (ret)
2254 goto out;
f9b3927a 2255
f7de5027
DCS
2256 spin_lock_irq(&uncore->lock);
2257 fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
6cc5ca76 2258 ecobus = __raw_uncore_read32(uncore, ECOBUS);
f7de5027
DCS
2259 fw_domains_put(uncore, FORCEWAKE_RENDER);
2260 spin_unlock_irq(&uncore->lock);
0b274481 2261
05a2fb15 2262 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
d0208cfa
WK
2263 drm_info(&i915->drm, "No MT forcewake available on Ivybridge, this can result in issues\n");
2264 drm_info(&i915->drm, "when using vblank-synced partial screen updates.\n");
f833cdb0 2265 fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER);
f7de5027 2266 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
05a2fb15 2267 FORCEWAKE, FORCEWAKE_ACK);
0b274481 2268 }
651e7d48 2269 } else if (GRAPHICS_VER(i915) == 6) {
5716c8c6 2270 uncore->fw_get_funcs = &uncore_get_thread_status;
f7de5027 2271 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
05a2fb15 2272 FORCEWAKE, FORCEWAKE_ACK);
0b274481 2273 }
3225b2f9 2274
f833cdb0
DCS
2275#undef fw_domain_init
2276
3225b2f9 2277 /* All future platforms are expected to require complex power gating */
48a1b8d4 2278 drm_WARN_ON(&i915->drm, !ret && uncore->fw_domains == 0);
f833cdb0
DCS
2279
2280out:
2281 if (ret)
2282 intel_uncore_fw_domains_fini(uncore);
2283
2284 return ret;
f9b3927a
MK
2285}
2286
f7de5027 2287#define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \
15157970 2288{ \
f7de5027 2289 (uncore)->fw_domains_table = \
15157970 2290 (struct intel_forcewake_range *)(d); \
f7de5027 2291 (uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \
15157970
TU
2292}
2293
6cdbb101
MR
2294#define ASSIGN_SHADOW_TABLE(uncore, d) \
2295{ \
2296 (uncore)->shadowed_reg_table = d; \
2297 (uncore)->shadowed_reg_table_entries = ARRAY_SIZE((d)); \
2298}
2299
264ec1a8
HG
2300static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
2301 unsigned long action, void *data)
2302{
9102650f
DCS
2303 struct intel_uncore *uncore = container_of(nb,
2304 struct intel_uncore, pmic_bus_access_nb);
264ec1a8
HG
2305
2306 switch (action) {
2307 case MBI_PMIC_BUS_ACCESS_BEGIN:
2308 /*
2309 * forcewake all now to make sure that we don't need to do a
2310 * forcewake later which on systems where this notifier gets
2311 * called requires the punit to access to the shared pmic i2c
2312 * bus, which will be busy after this notification, leading to:
2313 * "render: timed out waiting for forcewake ack request."
2314 * errors.
ce30560c
HG
2315 *
2316 * The notifier is unregistered during intel_runtime_suspend(),
2317 * so it's ok to access the HW here without holding a RPM
2318 * wake reference -> disable wakeref asserts for the time of
2319 * the access.
264ec1a8 2320 */
9102650f
DCS
2321 disable_rpm_wakeref_asserts(uncore->rpm);
2322 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
2323 enable_rpm_wakeref_asserts(uncore->rpm);
264ec1a8
HG
2324 break;
2325 case MBI_PMIC_BUS_ACCESS_END:
9102650f 2326 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
264ec1a8
HG
2327 break;
2328 }
2329
2330 return NOTIFY_OK;
2331}
2332
9ebb80e8
MR
2333static void uncore_unmap_mmio(struct drm_device *drm, void *regs)
2334{
70994bec 2335 iounmap((void __iomem *)regs);
9ebb80e8
MR
2336}
2337
bec68cc9 2338int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr)
25286aac 2339{
01385758 2340 struct drm_i915_private *i915 = uncore->i915;
25286aac
DCS
2341 int mmio_size;
2342
25286aac
DCS
2343 /*
2344 * Before gen4, the registers and the GTT are behind different BARs.
2345 * However, from gen4 onwards, the registers and the GTT are shared
2346 * in the same BAR, so we want to restrict this ioremap from
2347 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
2348 * the register BAR remains the same size for all the earlier
2349 * generations up to Ironlake.
da30390b
MR
2350 * For dgfx chips register range is expanded to 4MB, and this larger
2351 * range is also used for integrated gpus beginning with Meteor Lake.
25286aac 2352 */
da30390b 2353 if (IS_DGFX(i915) || GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
eafeb204 2354 mmio_size = 4 * 1024 * 1024;
da30390b 2355 else if (GRAPHICS_VER(i915) >= 5)
25286aac 2356 mmio_size = 2 * 1024 * 1024;
da30390b
MR
2357 else
2358 mmio_size = 512 * 1024;
eafeb204 2359
bec68cc9 2360 uncore->regs = ioremap(phys_addr, mmio_size);
25286aac 2361 if (uncore->regs == NULL) {
d0208cfa 2362 drm_err(&i915->drm, "failed to map registers\n");
25286aac
DCS
2363 return -EIO;
2364 }
2365
70994bec
JN
2366 return drmm_add_action_or_reset(&i915->drm, uncore_unmap_mmio,
2367 (void __force *)uncore->regs);
25286aac
DCS
2368}
2369
01385758 2370void intel_uncore_init_early(struct intel_uncore *uncore,
030def2c 2371 struct intel_gt *gt)
6cbe8830
DCS
2372{
2373 spin_lock_init(&uncore->lock);
030def2c
MW
2374 uncore->i915 = gt->i915;
2375 uncore->gt = gt;
2376 uncore->rpm = &gt->i915->runtime_pm;
6cbe8830 2377}
25286aac 2378
2e81bc61 2379static void uncore_raw_init(struct intel_uncore *uncore)
f9b3927a 2380{
2e81bc61 2381 GEM_BUG_ON(intel_uncore_has_forcewake(uncore));
25286aac 2382
0e65ce24
CW
2383 if (intel_vgpu_active(uncore->i915)) {
2384 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, vgpu);
2385 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, vgpu);
651e7d48 2386 } else if (GRAPHICS_VER(uncore->i915) == 5) {
2e81bc61
DCS
2387 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5);
2388 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5);
2389 } else {
2390 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2);
2391 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2);
2392 }
2393}
f7de5027 2394
14f2f9bf
MR
2395static int uncore_media_forcewake_init(struct intel_uncore *uncore)
2396{
2397 struct drm_i915_private *i915 = uncore->i915;
2398
2399 if (MEDIA_VER(i915) >= 13) {
2400 ASSIGN_FW_DOMAINS_TABLE(uncore, __xelpmp_fw_ranges);
2401 ASSIGN_SHADOW_TABLE(uncore, xelpmp_shadowed_regs);
2402 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2403 } else {
2404 MISSING_CASE(MEDIA_VER(i915));
2405 return -ENODEV;
2406 }
2407
2408 return 0;
2409}
2410
f833cdb0 2411static int uncore_forcewake_init(struct intel_uncore *uncore)
2e81bc61
DCS
2412{
2413 struct drm_i915_private *i915 = uncore->i915;
f833cdb0 2414 int ret;
cf9d2890 2415
2e81bc61 2416 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
5a0ba777 2417
f833cdb0
DCS
2418 ret = intel_uncore_fw_domains_init(uncore);
2419 if (ret)
2420 return ret;
2e81bc61 2421 forcewake_early_sanitize(uncore, 0);
75714940 2422
54fc4f13
MR
2423 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
2424
14f2f9bf
MR
2425 if (uncore->gt->type == GT_MEDIA)
2426 return uncore_media_forcewake_init(uncore);
2427
2428 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) {
2429 ASSIGN_FW_DOMAINS_TABLE(uncore, __mtl_fw_ranges);
2430 ASSIGN_SHADOW_TABLE(uncore, mtl_shadowed_regs);
2431 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
fb289464 2432 } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
e0531636 2433 ASSIGN_FW_DOMAINS_TABLE(uncore, __dg2_fw_ranges);
c74e66d4 2434 ASSIGN_SHADOW_TABLE(uncore, dg2_shadowed_regs);
aef02736 2435 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
bfac1e2b 2436 } else if (GRAPHICS_VER(i915) >= 12) {
cf82d9dd 2437 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges);
6cdbb101 2438 ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs);
aef02736 2439 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
bfac1e2b
MR
2440 } else if (GRAPHICS_VER(i915) == 11) {
2441 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
6cdbb101 2442 ASSIGN_SHADOW_TABLE(uncore, gen11_shadowed_regs);
aef02736 2443 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
bfac1e2b
MR
2444 } else if (IS_GRAPHICS_VER(i915, 9, 10)) {
2445 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
6cdbb101 2446 ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
bfac1e2b 2447 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
bfac1e2b
MR
2448 } else if (IS_CHERRYVIEW(i915)) {
2449 ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
6cdbb101 2450 ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
bfac1e2b 2451 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
bfac1e2b 2452 } else if (GRAPHICS_VER(i915) == 8) {
1ab2b4cd 2453 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges);
6cdbb101 2454 ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
09b2a597 2455 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
bfac1e2b
MR
2456 } else if (IS_VALLEYVIEW(i915)) {
2457 ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges);
2458 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
bfac1e2b 2459 } else if (IS_GRAPHICS_VER(i915, 6, 7)) {
1ab2b4cd 2460 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges);
bfac1e2b 2461 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
3967018e 2462 }
ed493883 2463
2e81bc61
DCS
2464 uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier;
2465 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
f833cdb0
DCS
2466
2467 return 0;
2e81bc61
DCS
2468}
2469
de414973
MR
2470static int sanity_check_mmio_access(struct intel_uncore *uncore)
2471{
2472 struct drm_i915_private *i915 = uncore->i915;
2473
2474 if (GRAPHICS_VER(i915) < 8)
2475 return 0;
2476
2477 /*
2478 * Sanitycheck that MMIO access to the device is working properly. If
381ab12d 2479 * the CPU is unable to communicate with a PCI device, BAR reads will
de414973
MR
2480 * return 0xFFFFFFFF. Let's make sure the device isn't in this state
2481 * before we start trying to access registers.
2482 *
2483 * We use the primary GT's forcewake register as our guinea pig since
2484 * it's been around since HSW and it's a masked register so the upper
2485 * 16 bits can never read back as 1's if device access is operating
2486 * properly.
2487 *
2488 * If MMIO isn't working, we'll wait up to 2 seconds to see if it
2489 * recovers, then give up.
2490 */
2491#define COND (__raw_uncore_read32(uncore, FORCEWAKE_MT) != ~0)
2492 if (wait_for(COND, 2000) == -ETIMEDOUT) {
2493 drm_err(&i915->drm, "Device is non-operational; MMIO access returns 0xFFFFFFFF!\n");
2494 return -EIO;
2495 }
2496
2497 return 0;
2498}
2499
2e81bc61
DCS
2500int intel_uncore_init_mmio(struct intel_uncore *uncore)
2501{
2502 struct drm_i915_private *i915 = uncore->i915;
2503 int ret;
2504
de414973
MR
2505 ret = sanity_check_mmio_access(uncore);
2506 if (ret)
2507 return ret;
2508
c256af0d
MR
2509 /*
2510 * The boot firmware initializes local memory and assesses its health.
2511 * If memory training fails, the punit will have been instructed to
2512 * keep the GT powered down; we won't be able to communicate with it
2513 * and we should not continue with driver initialization.
2514 */
2515 if (IS_DGFX(i915) &&
2516 !(__raw_uncore_read32(uncore, GU_CNTL) & LMEM_INIT)) {
2517 drm_err(&i915->drm, "LMEM not initialized by firmware\n");
2518 return -ENODEV;
2519 }
2520
d70cc074 2521 if (GRAPHICS_VER(i915) > 5 && !intel_vgpu_active(i915))
2e81bc61
DCS
2522 uncore->flags |= UNCORE_HAS_FORCEWAKE;
2523
f833cdb0 2524 if (!intel_uncore_has_forcewake(uncore)) {
2e81bc61 2525 uncore_raw_init(uncore);
f833cdb0
DCS
2526 } else {
2527 ret = uncore_forcewake_init(uncore);
2528 if (ret)
498f02b6 2529 return ret;
f833cdb0 2530 }
2e81bc61 2531
ccb2acea 2532 /* make sure fw funcs are set if and only if we have fw*/
5716c8c6 2533 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->fw_get_funcs);
ccb2acea
DCS
2534 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains);
2535 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains);
2536
2cf7bf6f
DCS
2537 if (HAS_FPGA_DBG_UNCLAIMED(i915))
2538 uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED;
2539
2540 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
2541 uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED;
2542
651e7d48 2543 if (IS_GRAPHICS_VER(i915, 6, 7))
2cf7bf6f
DCS
2544 uncore->flags |= UNCORE_HAS_FIFO;
2545
2e81bc61 2546 /* clear out unclaimed reg detection bit */
0a9b2630 2547 if (intel_uncore_unclaimed_mmio(uncore))
d0208cfa 2548 drm_dbg(&i915->drm, "unclaimed mmio detected on uncore init, clearing\n");
25286aac
DCS
2549
2550 return 0;
0b274481
BW
2551}
2552
26376a7e
OM
2553/*
2554 * We might have detected that some engines are fused off after we initialized
2555 * the forcewake domains. Prune them, to make sure they only reference existing
2556 * engines.
2557 */
242613af
DCS
2558void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
2559 struct intel_gt *gt)
26376a7e 2560{
2e81bc61
DCS
2561 enum forcewake_domains fw_domains = uncore->fw_domains;
2562 enum forcewake_domain_id domain_id;
2563 int i;
f7de5027 2564
651e7d48 2565 if (!intel_uncore_has_forcewake(uncore) || GRAPHICS_VER(uncore->i915) < 11)
2e81bc61 2566 return;
26376a7e 2567
2e81bc61
DCS
2568 for (i = 0; i < I915_MAX_VCS; i++) {
2569 domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
26376a7e 2570
242613af 2571 if (HAS_ENGINE(gt, _VCS(i)))
2e81bc61 2572 continue;
26376a7e 2573
bfac1e2b
MR
2574 /*
2575 * Starting with XeHP, the power well for an even-numbered
2576 * VDBOX is also used for shared units within the
2577 * media slice such as SFC. So even if the engine
2578 * itself is fused off, we still need to initialize
2579 * the forcewake domain if any of the other engines
2580 * in the same media slice are present.
2581 */
48ba4a6d 2582 if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 55) && i % 2 == 0) {
bfac1e2b
MR
2583 if ((i + 1 < I915_MAX_VCS) && HAS_ENGINE(gt, _VCS(i + 1)))
2584 continue;
2585
2586 if (HAS_ENGINE(gt, _VECS(i / 2)))
2587 continue;
2588 }
2589
2e81bc61
DCS
2590 if (fw_domains & BIT(domain_id))
2591 fw_domain_fini(uncore, domain_id);
2592 }
26376a7e 2593
2e81bc61
DCS
2594 for (i = 0; i < I915_MAX_VECS; i++) {
2595 domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
26376a7e 2596
242613af 2597 if (HAS_ENGINE(gt, _VECS(i)))
2e81bc61 2598 continue;
26376a7e 2599
2e81bc61
DCS
2600 if (fw_domains & BIT(domain_id))
2601 fw_domain_fini(uncore, domain_id);
26376a7e 2602 }
6b7cbdbe
JC
2603
2604 if ((fw_domains & BIT(FW_DOMAIN_ID_GSC)) && !HAS_ENGINE(gt, GSC0))
2605 fw_domain_fini(uncore, FW_DOMAIN_ID_GSC);
26376a7e
OM
2606}
2607
5a44fcd7
DCS
2608/*
2609 * The driver-initiated FLR is the highest level of reset that we can trigger
2610 * from within the driver. It is different from the PCI FLR in that it doesn't
2611 * fully reset the SGUnit and doesn't modify the PCI config space and therefore
2612 * it doesn't require a re-enumeration of the PCI BARs. However, the
2613 * driver-initiated FLR does still cause a reset of both GT and display and a
2614 * memory wipe of local and stolen memory, so recovery would require a full HW
2615 * re-init and saving/restoring (or re-populating) the wiped memory. Since we
2616 * perform the FLR as the very last action before releasing access to the HW
2617 * during the driver release flow, we don't attempt recovery at all, because
2618 * if/when a new instance of i915 is bound to the device it will do a full
2619 * re-init anyway.
2620 */
2621static void driver_initiated_flr(struct intel_uncore *uncore)
2622{
2623 struct drm_i915_private *i915 = uncore->i915;
c5d86c19 2624 unsigned int flr_timeout_ms;
5a44fcd7
DCS
2625 int ret;
2626
2627 drm_dbg(&i915->drm, "Triggering Driver-FLR\n");
2628
c5d86c19
AS
2629 /*
2630 * The specification recommends a 3 seconds FLR reset timeout. To be
2631 * cautious, we will extend this to 9 seconds, three times the specified
2632 * timeout.
2633 */
2634 flr_timeout_ms = 9000;
2635
5a44fcd7
DCS
2636 /*
2637 * Make sure any pending FLR requests have cleared by waiting for the
2638 * FLR trigger bit to go to zero. Also clear GU_DEBUG's DRIVERFLR_STATUS
2639 * to make sure it's not still set from a prior attempt (it's a write to
2640 * clear bit).
2641 * Note that we should never be in a situation where a previous attempt
2642 * is still pending (unless the HW is totally dead), but better to be
2643 * safe in case something unexpected happens
2644 */
2645 ret = intel_wait_for_register_fw(uncore, GU_CNTL, DRIVERFLR, 0, flr_timeout_ms);
2646 if (ret) {
2647 drm_err(&i915->drm,
2648 "Failed to wait for Driver-FLR bit to clear! %d\n",
2649 ret);
2650 return;
2651 }
2652 intel_uncore_write_fw(uncore, GU_DEBUG, DRIVERFLR_STATUS);
2653
2654 /* Trigger the actual Driver-FLR */
2655 intel_uncore_rmw_fw(uncore, GU_CNTL, 0, DRIVERFLR);
2656
0591bdad
AP
2657 /* Wait for hardware teardown to complete */
2658 ret = intel_wait_for_register_fw(uncore, GU_CNTL,
2659 DRIVERFLR, 0,
2660 flr_timeout_ms);
2661 if (ret) {
2662 drm_err(&i915->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret);
2663 return;
2664 }
2665
2666 /* Wait for hardware/firmware re-init to complete */
5a44fcd7
DCS
2667 ret = intel_wait_for_register_fw(uncore, GU_DEBUG,
2668 DRIVERFLR_STATUS, DRIVERFLR_STATUS,
2669 flr_timeout_ms);
2670 if (ret) {
0591bdad 2671 drm_err(&i915->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret);
5a44fcd7
DCS
2672 return;
2673 }
2674
0591bdad 2675 /* Clear sticky completion status */
5a44fcd7
DCS
2676 intel_uncore_write_fw(uncore, GU_DEBUG, DRIVERFLR_STATUS);
2677}
2678
cfb0fa42
MR
2679/* Called via drm-managed action */
2680void intel_uncore_fini_mmio(struct drm_device *dev, void *data)
0b274481 2681{
cfb0fa42
MR
2682 struct intel_uncore *uncore = data;
2683
2e81bc61
DCS
2684 if (intel_uncore_has_forcewake(uncore)) {
2685 iosf_mbi_punit_acquire();
2686 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
2687 &uncore->pmic_bus_access_nb);
2688 intel_uncore_forcewake_reset(uncore);
f833cdb0 2689 intel_uncore_fw_domains_fini(uncore);
2e81bc61
DCS
2690 iosf_mbi_punit_release();
2691 }
5a44fcd7
DCS
2692
2693 if (intel_uncore_needs_flr_on_fini(uncore))
2694 driver_initiated_flr(uncore);
0b274481
BW
2695}
2696
1758b90e 2697/**
1d1a9774 2698 * __intel_wait_for_register_fw - wait until register matches expected state
d2d551c0 2699 * @uncore: the struct intel_uncore
1758b90e
CW
2700 * @reg: the register to read
2701 * @mask: mask to apply to register value
2702 * @value: expected value
1d1a9774
MW
2703 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
2704 * @slow_timeout_ms: slow timeout in millisecond
2705 * @out_value: optional placeholder to hold registry value
1758b90e
CW
2706 *
2707 * This routine waits until the target register @reg contains the expected
3d466cd6
DV
2708 * @value after applying the @mask, i.e. it waits until ::
2709 *
669f3f2b 2710 * (intel_uncore_read_fw(uncore, reg) & mask) == value
3d466cd6 2711 *
1d1a9774 2712 * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
6976e74b 2713 * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
84d84cb7 2714 * must be not larger than 20,0000 microseconds.
1758b90e
CW
2715 *
2716 * Note that this routine assumes the caller holds forcewake asserted, it is
2717 * not suitable for very long waits. See intel_wait_for_register() if you
2718 * wish to wait without holding forcewake for the duration (i.e. you expect
2719 * the wait to be slow).
2720 *
e4661f14 2721 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
1758b90e 2722 */
d2d551c0 2723int __intel_wait_for_register_fw(struct intel_uncore *uncore,
1d1a9774 2724 i915_reg_t reg,
3fc7d86b
MW
2725 u32 mask,
2726 u32 value,
2727 unsigned int fast_timeout_us,
2728 unsigned int slow_timeout_ms,
1d1a9774 2729 u32 *out_value)
1758b90e 2730{
b79ffa91 2731 u32 reg_value = 0;
d2d551c0 2732#define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value)
1d1a9774
MW
2733 int ret;
2734
6976e74b 2735 /* Catch any overuse of this function */
84d84cb7
CW
2736 might_sleep_if(slow_timeout_ms);
2737 GEM_BUG_ON(fast_timeout_us > 20000);
b79ffa91 2738 GEM_BUG_ON(!fast_timeout_us && !slow_timeout_ms);
6976e74b 2739
84d84cb7
CW
2740 ret = -ETIMEDOUT;
2741 if (fast_timeout_us && fast_timeout_us <= 20000)
1d1a9774 2742 ret = _wait_for_atomic(done, fast_timeout_us, 0);
ff26ffa8 2743 if (ret && slow_timeout_ms)
1d1a9774 2744 ret = wait_for(done, slow_timeout_ms);
84d84cb7 2745
1d1a9774
MW
2746 if (out_value)
2747 *out_value = reg_value;
84d84cb7 2748
1758b90e
CW
2749 return ret;
2750#undef done
2751}
2752
2753/**
23fdbdd7 2754 * __intel_wait_for_register - wait until register matches expected state
baba6e57 2755 * @uncore: the struct intel_uncore
1758b90e
CW
2756 * @reg: the register to read
2757 * @mask: mask to apply to register value
2758 * @value: expected value
23fdbdd7
SP
2759 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
2760 * @slow_timeout_ms: slow timeout in millisecond
2761 * @out_value: optional placeholder to hold registry value
1758b90e
CW
2762 *
2763 * This routine waits until the target register @reg contains the expected
3d466cd6
DV
2764 * @value after applying the @mask, i.e. it waits until ::
2765 *
54b3f0e6 2766 * (intel_uncore_read(uncore, reg) & mask) == value
3d466cd6 2767 *
1758b90e
CW
2768 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
2769 *
e4661f14 2770 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
1758b90e 2771 */
97a04e0d
DCS
2772int __intel_wait_for_register(struct intel_uncore *uncore,
2773 i915_reg_t reg,
2774 u32 mask,
2775 u32 value,
2776 unsigned int fast_timeout_us,
2777 unsigned int slow_timeout_ms,
2778 u32 *out_value)
2779{
1758b90e 2780 unsigned fw =
4319382e 2781 intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
23fdbdd7 2782 u32 reg_value;
1758b90e
CW
2783 int ret;
2784
3df82dd4 2785 might_sleep_if(slow_timeout_ms);
05646543 2786
272c7e52
DCS
2787 spin_lock_irq(&uncore->lock);
2788 intel_uncore_forcewake_get__locked(uncore, fw);
05646543 2789
d2d551c0 2790 ret = __intel_wait_for_register_fw(uncore,
05646543 2791 reg, mask, value,
23fdbdd7 2792 fast_timeout_us, 0, &reg_value);
05646543 2793
272c7e52
DCS
2794 intel_uncore_forcewake_put__locked(uncore, fw);
2795 spin_unlock_irq(&uncore->lock);
05646543 2796
3df82dd4 2797 if (ret && slow_timeout_ms)
d2d551c0
DCS
2798 ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore,
2799 reg),
23fdbdd7
SP
2800 (reg_value & mask) == value,
2801 slow_timeout_ms * 1000, 10, 1000);
2802
39806c3f
VS
2803 /* just trace the final value */
2804 trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
2805
23fdbdd7
SP
2806 if (out_value)
2807 *out_value = reg_value;
1758b90e
CW
2808
2809 return ret;
d431440c
TE
2810}
2811
2cf7bf6f 2812bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
907b28c5 2813{
0a9b2630
DCS
2814 bool ret;
2815
639e30ee
MR
2816 if (!uncore->debug)
2817 return false;
2818
0a9b2630
DCS
2819 spin_lock_irq(&uncore->debug->lock);
2820 ret = check_for_unclaimed_mmio(uncore);
2821 spin_unlock_irq(&uncore->debug->lock);
2822
2823 return ret;
907b28c5 2824}
75714940 2825
bc3b9346 2826bool
2cf7bf6f 2827intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
75714940 2828{
a167b1e1
CW
2829 bool ret = false;
2830
639e30ee
MR
2831 if (drm_WARN_ON(&uncore->i915->drm, !uncore->debug))
2832 return false;
2833
0a9b2630 2834 spin_lock_irq(&uncore->debug->lock);
a167b1e1 2835
0a9b2630 2836 if (unlikely(uncore->debug->unclaimed_mmio_check <= 0))
a167b1e1 2837 goto out;
75714940 2838
0a9b2630 2839 if (unlikely(check_for_unclaimed_mmio(uncore))) {
8a25c4be 2840 if (!uncore->i915->params.mmio_debug) {
d0208cfa
WK
2841 drm_dbg(&uncore->i915->drm,
2842 "Unclaimed register detected, "
2843 "enabling oneshot unclaimed register reporting. "
2844 "Please use i915.mmio_debug=N for more information.\n");
8a25c4be 2845 uncore->i915->params.mmio_debug++;
7ef4ac6e 2846 }
0a9b2630 2847 uncore->debug->unclaimed_mmio_check--;
a167b1e1 2848 ret = true;
75714940 2849 }
bc3b9346 2850
a167b1e1 2851out:
0a9b2630 2852 spin_unlock_irq(&uncore->debug->lock);
a167b1e1
CW
2853
2854 return ret;
75714940 2855}
3756685a 2856
3756685a
TU
2857/**
2858 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
2859 * a register
4319382e 2860 * @uncore: pointer to struct intel_uncore
3756685a
TU
2861 * @reg: register in question
2862 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
2863 *
2864 * Returns a set of forcewake domains required to be taken with for example
2865 * intel_uncore_forcewake_get for the specified register to be accessible in the
2866 * specified mode (read, write or read/write) with raw mmio accessors.
2867 *
2868 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
2869 * callers to do FIFO management on their own or risk losing writes.
2870 */
2871enum forcewake_domains
4319382e 2872intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
3756685a
TU
2873 i915_reg_t reg, unsigned int op)
2874{
2875 enum forcewake_domains fw_domains = 0;
2876
a9f236d1 2877 drm_WARN_ON(&uncore->i915->drm, !op);
3756685a 2878
4319382e 2879 if (!intel_uncore_has_forcewake(uncore))
895833bd
TU
2880 return 0;
2881
3756685a 2882 if (op & FW_REG_READ)
ccb2acea 2883 fw_domains = uncore->funcs.read_fw_domains(uncore, reg);
3756685a
TU
2884
2885 if (op & FW_REG_WRITE)
ccb2acea
DCS
2886 fw_domains |= uncore->funcs.write_fw_domains(uncore, reg);
2887
a9f236d1 2888 drm_WARN_ON(&uncore->i915->drm, fw_domains & ~uncore->fw_domains);
3756685a
TU
2889
2890 return fw_domains;
2891}
26e7a2a1
CW
2892
2893#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
0757ac8f 2894#include "selftests/mock_uncore.c"
26e7a2a1
CW
2895#include "selftests/intel_uncore.c"
2896#endif