2 * Copyright © 2013 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <linux/pm_runtime.h>
25 #include <asm/iosf_mbi.h>
28 #include "i915_vgpu.h"
29 #include "intel_drv.h"
32 #define FORCEWAKE_ACK_TIMEOUT_MS 50
33 #define GT_FIFO_TIMEOUT_MS 10
35 #define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
37 static const char * const forcewake_domain_names[] = {
50 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
52 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
54 if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
55 return forcewake_domain_names[id];
62 #define fw_ack(d) readl((d)->reg_ack)
63 #define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set)
64 #define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set)
67 fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
70 * We don't really know if the powerwell for the forcewake domain we are
71 * trying to reset here does exist at this point (engines could be fused
72 * off in ICL+), so no waiting for acks
74 /* WaRsClearFWBitsAtReset:bdw,skl */
79 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
82 hrtimer_start_range_ns(&d->timer,
89 __wait_for_ack(const struct intel_uncore_forcewake_domain *d,
93 return wait_for_atomic((fw_ack(d) & ack) == value,
94 FORCEWAKE_ACK_TIMEOUT_MS);
98 wait_ack_clear(const struct intel_uncore_forcewake_domain *d,
101 return __wait_for_ack(d, ack, 0);
105 wait_ack_set(const struct intel_uncore_forcewake_domain *d,
108 return __wait_for_ack(d, ack, ack);
112 fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
114 if (wait_ack_clear(d, FORCEWAKE_KERNEL)) {
115 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
116 intel_uncore_forcewake_domain_to_str(d->id));
117 add_taint_for_CI(TAINT_WARN); /* CI now unreliable */
127 fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d,
128 const enum ack_type type)
130 const u32 ack_bit = FORCEWAKE_KERNEL;
131 const u32 value = type == ACK_SET ? ack_bit : 0;
136 * There is a possibility of driver's wake request colliding
137 * with hardware's own wake requests and that can cause
138 * hardware to not deliver the driver's ack message.
140 * Use a fallback bit toggle to kick the gpu state machine
141 * in the hope that the original ack will be delivered along with
144 * This workaround is described in HSDES #1604254524 and it's known as:
145 * WaRsForcewakeAddDelayForAck:skl,bxt,kbl,glk,cfl,cnl,icl
146 * although the name is a bit misleading.
151 wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK);
153 fw_set(d, FORCEWAKE_KERNEL_FALLBACK);
154 /* Give gt some time to relax before the polling frenzy */
156 wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK);
158 ack_detected = (fw_ack(d) & ack_bit) == value;
160 fw_clear(d, FORCEWAKE_KERNEL_FALLBACK);
161 } while (!ack_detected && pass++ < 10);
163 DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n",
164 intel_uncore_forcewake_domain_to_str(d->id),
165 type == ACK_SET ? "set" : "clear",
169 return ack_detected ? 0 : -ETIMEDOUT;
173 fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d)
175 if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL)))
178 if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR))
179 fw_domain_wait_ack_clear(d);
183 fw_domain_get(const struct intel_uncore_forcewake_domain *d)
185 fw_set(d, FORCEWAKE_KERNEL);
189 fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
191 if (wait_ack_set(d, FORCEWAKE_KERNEL)) {
192 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
193 intel_uncore_forcewake_domain_to_str(d->id));
194 add_taint_for_CI(TAINT_WARN); /* CI now unreliable */
199 fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d)
201 if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL)))
204 if (fw_domain_wait_ack_with_fallback(d, ACK_SET))
205 fw_domain_wait_ack_set(d);
209 fw_domain_put(const struct intel_uncore_forcewake_domain *d)
211 fw_clear(d, FORCEWAKE_KERNEL);
215 fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
217 struct intel_uncore_forcewake_domain *d;
220 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
222 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
223 fw_domain_wait_ack_clear(d);
227 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
228 fw_domain_wait_ack_set(d);
230 uncore->fw_domains_active |= fw_domains;
234 fw_domains_get_with_fallback(struct intel_uncore *uncore,
235 enum forcewake_domains fw_domains)
237 struct intel_uncore_forcewake_domain *d;
240 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
242 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
243 fw_domain_wait_ack_clear_fallback(d);
247 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
248 fw_domain_wait_ack_set_fallback(d);
250 uncore->fw_domains_active |= fw_domains;
254 fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
256 struct intel_uncore_forcewake_domain *d;
259 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
261 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
264 uncore->fw_domains_active &= ~fw_domains;
268 fw_domains_reset(struct intel_uncore *uncore,
269 enum forcewake_domains fw_domains)
271 struct intel_uncore_forcewake_domain *d;
277 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
279 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
283 static inline u32 gt_thread_status(struct intel_uncore *uncore)
287 val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG);
288 val &= GEN6_GT_THREAD_STATUS_CORE_MASK;
293 static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
296 * w/a for a sporadic read returning 0 by waiting for the GT
299 WARN_ONCE(wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
300 "GT thread status wait timed out\n");
303 static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
304 enum forcewake_domains fw_domains)
306 fw_domains_get(uncore, fw_domains);
308 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
309 __gen6_gt_wait_for_thread_c0(uncore);
312 static inline u32 fifo_free_entries(struct intel_uncore *uncore)
314 u32 count = __raw_uncore_read32(uncore, GTFIFOCTL);
316 return count & GT_FIFO_FREE_ENTRIES_MASK;
319 static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
323 /* On VLV, FIFO will be shared by both SW and HW.
324 * So, we need to read the FREE_ENTRIES everytime */
325 if (IS_VALLEYVIEW(uncore->i915))
326 n = fifo_free_entries(uncore);
328 n = uncore->fifo_count;
330 if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
331 if (wait_for_atomic((n = fifo_free_entries(uncore)) >
332 GT_FIFO_NUM_RESERVED_ENTRIES,
333 GT_FIFO_TIMEOUT_MS)) {
334 DRM_DEBUG("GT_FIFO timeout, entries: %u\n", n);
339 uncore->fifo_count = n - 1;
342 static enum hrtimer_restart
343 intel_uncore_fw_release_timer(struct hrtimer *timer)
345 struct intel_uncore_forcewake_domain *domain =
346 container_of(timer, struct intel_uncore_forcewake_domain, timer);
347 struct intel_uncore *uncore = domain->uncore;
348 unsigned long irqflags;
350 assert_rpm_device_not_suspended(uncore->rpm);
352 if (xchg(&domain->active, false))
353 return HRTIMER_RESTART;
355 spin_lock_irqsave(&uncore->lock, irqflags);
356 if (WARN_ON(domain->wake_count == 0))
357 domain->wake_count++;
359 if (--domain->wake_count == 0)
360 uncore->funcs.force_wake_put(uncore, domain->mask);
362 spin_unlock_irqrestore(&uncore->lock, irqflags);
364 return HRTIMER_NORESTART;
367 /* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
369 intel_uncore_forcewake_reset(struct intel_uncore *uncore)
371 unsigned long irqflags;
372 struct intel_uncore_forcewake_domain *domain;
373 int retry_count = 100;
374 enum forcewake_domains fw, active_domains;
376 iosf_mbi_assert_punit_acquired();
378 /* Hold uncore.lock across reset to prevent any register access
379 * with forcewake not set correctly. Wait until all pending
380 * timers are run before holding.
387 for_each_fw_domain(domain, uncore, tmp) {
388 smp_store_mb(domain->active, false);
389 if (hrtimer_cancel(&domain->timer) == 0)
392 intel_uncore_fw_release_timer(&domain->timer);
395 spin_lock_irqsave(&uncore->lock, irqflags);
397 for_each_fw_domain(domain, uncore, tmp) {
398 if (hrtimer_active(&domain->timer))
399 active_domains |= domain->mask;
402 if (active_domains == 0)
405 if (--retry_count == 0) {
406 DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
410 spin_unlock_irqrestore(&uncore->lock, irqflags);
414 WARN_ON(active_domains);
416 fw = uncore->fw_domains_active;
418 uncore->funcs.force_wake_put(uncore, fw);
420 fw_domains_reset(uncore, uncore->fw_domains);
421 assert_forcewakes_inactive(uncore);
423 spin_unlock_irqrestore(&uncore->lock, irqflags);
425 return fw; /* track the lost user forcewake domains */
429 fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore)
433 dbg = __raw_uncore_read32(uncore, FPGA_DBG);
434 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
437 __raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
443 vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore)
447 cer = __raw_uncore_read32(uncore, CLAIM_ER);
448 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
451 __raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR);
457 gen6_check_for_fifo_debug(struct intel_uncore *uncore)
461 fifodbg = __raw_uncore_read32(uncore, GTFIFODBG);
463 if (unlikely(fifodbg)) {
464 DRM_DEBUG_DRIVER("GTFIFODBG = 0x08%x\n", fifodbg);
465 __raw_uncore_write32(uncore, GTFIFODBG, fifodbg);
472 check_for_unclaimed_mmio(struct intel_uncore *uncore)
476 if (intel_uncore_has_fpga_dbg_unclaimed(uncore))
477 ret |= fpga_check_for_unclaimed_mmio(uncore);
479 if (intel_uncore_has_dbg_unclaimed(uncore))
480 ret |= vlv_check_for_unclaimed_mmio(uncore);
482 if (intel_uncore_has_fifo(uncore))
483 ret |= gen6_check_for_fifo_debug(uncore);
488 static void forcewake_early_sanitize(struct intel_uncore *uncore,
489 unsigned int restore_forcewake)
491 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
493 /* WaDisableShadowRegForCpd:chv */
494 if (IS_CHERRYVIEW(uncore->i915)) {
495 __raw_uncore_write32(uncore, GTFIFOCTL,
496 __raw_uncore_read32(uncore, GTFIFOCTL) |
497 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
498 GT_FIFO_CTL_RC6_POLICY_STALL);
501 iosf_mbi_punit_acquire();
502 intel_uncore_forcewake_reset(uncore);
503 if (restore_forcewake) {
504 spin_lock_irq(&uncore->lock);
505 uncore->funcs.force_wake_get(uncore, restore_forcewake);
507 if (intel_uncore_has_fifo(uncore))
508 uncore->fifo_count = fifo_free_entries(uncore);
509 spin_unlock_irq(&uncore->lock);
511 iosf_mbi_punit_release();
514 void intel_uncore_suspend(struct intel_uncore *uncore)
516 if (!intel_uncore_has_forcewake(uncore))
519 iosf_mbi_punit_acquire();
520 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
521 &uncore->pmic_bus_access_nb);
522 uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore);
523 iosf_mbi_punit_release();
526 void intel_uncore_resume_early(struct intel_uncore *uncore)
528 unsigned int restore_forcewake;
530 if (intel_uncore_unclaimed_mmio(uncore))
531 DRM_DEBUG("unclaimed mmio detected on resume, clearing\n");
533 if (!intel_uncore_has_forcewake(uncore))
536 restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
537 forcewake_early_sanitize(uncore, restore_forcewake);
539 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
542 void intel_uncore_runtime_resume(struct intel_uncore *uncore)
544 if (!intel_uncore_has_forcewake(uncore))
547 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
550 static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
551 enum forcewake_domains fw_domains)
553 struct intel_uncore_forcewake_domain *domain;
556 fw_domains &= uncore->fw_domains;
558 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
559 if (domain->wake_count++) {
560 fw_domains &= ~domain->mask;
561 domain->active = true;
566 uncore->funcs.force_wake_get(uncore, fw_domains);
570 * intel_uncore_forcewake_get - grab forcewake domain references
571 * @uncore: the intel_uncore structure
572 * @fw_domains: forcewake domains to get reference on
574 * This function can be used get GT's forcewake domain references.
575 * Normal register access will handle the forcewake domains automatically.
576 * However if some sequence requires the GT to not power down a particular
577 * forcewake domains this function should be called at the beginning of the
578 * sequence. And subsequently the reference should be dropped by symmetric
579 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
580 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
582 void intel_uncore_forcewake_get(struct intel_uncore *uncore,
583 enum forcewake_domains fw_domains)
585 unsigned long irqflags;
587 if (!uncore->funcs.force_wake_get)
590 assert_rpm_wakelock_held(uncore->rpm);
592 spin_lock_irqsave(&uncore->lock, irqflags);
593 __intel_uncore_forcewake_get(uncore, fw_domains);
594 spin_unlock_irqrestore(&uncore->lock, irqflags);
598 * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
599 * @uncore: the intel_uncore structure
601 * This function is a wrapper around intel_uncore_forcewake_get() to acquire
602 * the GT powerwell and in the process disable our debugging for the
603 * duration of userspace's bypass.
605 void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
607 spin_lock_irq(&uncore->lock);
608 if (!uncore->user_forcewake.count++) {
609 intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
611 /* Save and disable mmio debugging for the user bypass */
612 uncore->user_forcewake.saved_mmio_check =
613 uncore->unclaimed_mmio_check;
614 uncore->user_forcewake.saved_mmio_debug =
615 i915_modparams.mmio_debug;
617 uncore->unclaimed_mmio_check = 0;
618 i915_modparams.mmio_debug = 0;
620 spin_unlock_irq(&uncore->lock);
624 * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
625 * @uncore: the intel_uncore structure
627 * This function complements intel_uncore_forcewake_user_get() and releases
628 * the GT powerwell taken on behalf of the userspace bypass.
630 void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
632 spin_lock_irq(&uncore->lock);
633 if (!--uncore->user_forcewake.count) {
634 if (intel_uncore_unclaimed_mmio(uncore))
635 dev_info(uncore->i915->drm.dev,
636 "Invalid mmio detected during user access\n");
638 uncore->unclaimed_mmio_check =
639 uncore->user_forcewake.saved_mmio_check;
640 i915_modparams.mmio_debug =
641 uncore->user_forcewake.saved_mmio_debug;
643 intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
645 spin_unlock_irq(&uncore->lock);
649 * intel_uncore_forcewake_get__locked - grab forcewake domain references
650 * @uncore: the intel_uncore structure
651 * @fw_domains: forcewake domains to get reference on
653 * See intel_uncore_forcewake_get(). This variant places the onus
654 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
656 void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
657 enum forcewake_domains fw_domains)
659 lockdep_assert_held(&uncore->lock);
661 if (!uncore->funcs.force_wake_get)
664 __intel_uncore_forcewake_get(uncore, fw_domains);
667 static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
668 enum forcewake_domains fw_domains)
670 struct intel_uncore_forcewake_domain *domain;
673 fw_domains &= uncore->fw_domains;
675 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
676 if (WARN_ON(domain->wake_count == 0))
679 if (--domain->wake_count) {
680 domain->active = true;
684 fw_domain_arm_timer(domain);
689 * intel_uncore_forcewake_put - release a forcewake domain reference
690 * @uncore: the intel_uncore structure
691 * @fw_domains: forcewake domains to put references
693 * This function drops the device-level forcewakes for specified
694 * domains obtained by intel_uncore_forcewake_get().
696 void intel_uncore_forcewake_put(struct intel_uncore *uncore,
697 enum forcewake_domains fw_domains)
699 unsigned long irqflags;
701 if (!uncore->funcs.force_wake_put)
704 spin_lock_irqsave(&uncore->lock, irqflags);
705 __intel_uncore_forcewake_put(uncore, fw_domains);
706 spin_unlock_irqrestore(&uncore->lock, irqflags);
710 * intel_uncore_forcewake_put__locked - grab forcewake domain references
711 * @uncore: the intel_uncore structure
712 * @fw_domains: forcewake domains to get reference on
714 * See intel_uncore_forcewake_put(). This variant places the onus
715 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
717 void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
718 enum forcewake_domains fw_domains)
720 lockdep_assert_held(&uncore->lock);
722 if (!uncore->funcs.force_wake_put)
725 __intel_uncore_forcewake_put(uncore, fw_domains);
728 void assert_forcewakes_inactive(struct intel_uncore *uncore)
730 if (!uncore->funcs.force_wake_get)
733 WARN(uncore->fw_domains_active,
734 "Expected all fw_domains to be inactive, but %08x are still on\n",
735 uncore->fw_domains_active);
738 void assert_forcewakes_active(struct intel_uncore *uncore,
739 enum forcewake_domains fw_domains)
741 struct intel_uncore_forcewake_domain *domain;
744 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
747 if (!uncore->funcs.force_wake_get)
750 assert_rpm_wakelock_held(uncore->rpm);
752 fw_domains &= uncore->fw_domains;
753 WARN(fw_domains & ~uncore->fw_domains_active,
754 "Expected %08x fw_domains to be active, but %08x are off\n",
755 fw_domains, fw_domains & ~uncore->fw_domains_active);
758 * Check that the caller has an explicit wakeref and we don't mistake
759 * it for the auto wakeref.
762 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
763 unsigned int expect = 1;
765 if (hrtimer_active(&domain->timer) && READ_ONCE(domain->active))
766 expect++; /* pending automatic release */
768 if (WARN(domain->wake_count < expect,
769 "Expected domain %d to be held awake by caller, count=%d\n",
770 domain->id, domain->wake_count))
776 /* We give fast paths for the really cool registers */
777 #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
779 #define GEN11_NEEDS_FORCE_WAKE(reg) \
780 ((reg) < 0x40000 || ((reg) >= 0x1c0000 && (reg) < 0x1dc000))
782 #define __gen6_reg_read_fw_domains(uncore, offset) \
784 enum forcewake_domains __fwd; \
785 if (NEEDS_FORCE_WAKE(offset)) \
786 __fwd = FORCEWAKE_RENDER; \
792 static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
794 if (offset < entry->start)
796 else if (offset > entry->end)
802 /* Copied and "macroized" from lib/bsearch.c */
803 #define BSEARCH(key, base, num, cmp) ({ \
804 unsigned int start__ = 0, end__ = (num); \
805 typeof(base) result__ = NULL; \
806 while (start__ < end__) { \
807 unsigned int mid__ = start__ + (end__ - start__) / 2; \
808 int ret__ = (cmp)((key), (base) + mid__); \
811 } else if (ret__ > 0) { \
812 start__ = mid__ + 1; \
814 result__ = (base) + mid__; \
821 static enum forcewake_domains
822 find_fw_domain(struct intel_uncore *uncore, u32 offset)
824 const struct intel_forcewake_range *entry;
826 entry = BSEARCH(offset,
827 uncore->fw_domains_table,
828 uncore->fw_domains_table_entries,
835 * The list of FW domains depends on the SKU in gen11+ so we
836 * can't determine it statically. We use FORCEWAKE_ALL and
837 * translate it here to the list of available domains.
839 if (entry->domains == FORCEWAKE_ALL)
840 return uncore->fw_domains;
842 WARN(entry->domains & ~uncore->fw_domains,
843 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
844 entry->domains & ~uncore->fw_domains, offset);
846 return entry->domains;
849 #define GEN_FW_RANGE(s, e, d) \
850 { .start = (s), .end = (e), .domains = (d) }
852 #define HAS_FWTABLE(dev_priv) \
853 (INTEL_GEN(dev_priv) >= 9 || \
854 IS_CHERRYVIEW(dev_priv) || \
855 IS_VALLEYVIEW(dev_priv))
857 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
858 static const struct intel_forcewake_range __vlv_fw_ranges[] = {
859 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
860 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
861 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
862 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
863 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
864 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
865 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
868 #define __fwtable_reg_read_fw_domains(uncore, offset) \
870 enum forcewake_domains __fwd = 0; \
871 if (NEEDS_FORCE_WAKE((offset))) \
872 __fwd = find_fw_domain(uncore, offset); \
876 #define __gen11_fwtable_reg_read_fw_domains(uncore, offset) \
878 enum forcewake_domains __fwd = 0; \
879 if (GEN11_NEEDS_FORCE_WAKE((offset))) \
880 __fwd = find_fw_domain(uncore, offset); \
884 /* *Must* be sorted by offset! See intel_shadow_table_check(). */
885 static const i915_reg_t gen8_shadowed_regs[] = {
886 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
887 GEN6_RPNSWREQ, /* 0xA008 */
888 GEN6_RC_VIDEO_FREQ, /* 0xA00C */
889 RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */
890 RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */
891 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
892 /* TODO: Other registers are not yet used */
895 static const i915_reg_t gen11_shadowed_regs[] = {
896 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
897 GEN6_RPNSWREQ, /* 0xA008 */
898 GEN6_RC_VIDEO_FREQ, /* 0xA00C */
899 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
900 RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */
901 RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */
902 RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */
903 RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */
904 RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */
905 RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */
906 /* TODO: Other registers are not yet used */
909 static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
911 u32 offset = i915_mmio_reg_offset(*reg);
915 else if (key > offset)
921 #define __is_genX_shadowed(x) \
922 static bool is_gen##x##_shadowed(u32 offset) \
924 const i915_reg_t *regs = gen##x##_shadowed_regs; \
925 return BSEARCH(offset, regs, ARRAY_SIZE(gen##x##_shadowed_regs), \
929 __is_genX_shadowed(8)
930 __is_genX_shadowed(11)
932 static enum forcewake_domains
933 gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
935 return FORCEWAKE_RENDER;
938 #define __gen8_reg_write_fw_domains(uncore, offset) \
940 enum forcewake_domains __fwd; \
941 if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
942 __fwd = FORCEWAKE_RENDER; \
948 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
949 static const struct intel_forcewake_range __chv_fw_ranges[] = {
950 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
951 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
952 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
953 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
954 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
955 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
956 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
957 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
958 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
959 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
960 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
961 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
962 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
963 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
964 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
965 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
968 #define __fwtable_reg_write_fw_domains(uncore, offset) \
970 enum forcewake_domains __fwd = 0; \
971 if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
972 __fwd = find_fw_domain(uncore, offset); \
976 #define __gen11_fwtable_reg_write_fw_domains(uncore, offset) \
978 enum forcewake_domains __fwd = 0; \
979 if (GEN11_NEEDS_FORCE_WAKE((offset)) && !is_gen11_shadowed(offset)) \
980 __fwd = find_fw_domain(uncore, offset); \
984 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
985 static const struct intel_forcewake_range __gen9_fw_ranges[] = {
986 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
987 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
988 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
989 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
990 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
991 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
992 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
993 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER),
994 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
995 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
996 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
997 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
998 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
999 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
1000 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER),
1001 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1002 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
1003 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1004 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
1005 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1006 GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER),
1007 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
1008 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER),
1009 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
1010 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER),
1011 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1012 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER),
1013 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
1014 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER),
1015 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
1016 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER),
1017 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
1020 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
1021 static const struct intel_forcewake_range __gen11_fw_ranges[] = {
1022 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
1023 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
1024 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1025 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
1026 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1027 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
1028 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1029 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_BLITTER),
1030 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1031 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
1032 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1033 GEN_FW_RANGE(0x8500, 0x8bff, FORCEWAKE_BLITTER),
1034 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1035 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
1036 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL),
1037 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
1038 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1039 GEN_FW_RANGE(0xb480, 0xdfff, FORCEWAKE_BLITTER),
1040 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
1041 GEN_FW_RANGE(0xe900, 0x243ff, FORCEWAKE_BLITTER),
1042 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
1043 GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER),
1044 GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1045 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
1046 GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1),
1047 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0),
1048 GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_BLITTER),
1049 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
1050 GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3),
1051 GEN_FW_RANGE(0x1d8000, 0x1dbfff, FORCEWAKE_MEDIA_VEBOX1)
1055 ilk_dummy_write(struct intel_uncore *uncore)
1057 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
1058 * the chip from rc6 before touching it for real. MI_MODE is masked,
1059 * hence harmless to write 0 into. */
1060 __raw_uncore_write32(uncore, MI_MODE, 0);
1064 __unclaimed_reg_debug(struct intel_uncore *uncore,
1065 const i915_reg_t reg,
1069 if (WARN(check_for_unclaimed_mmio(uncore) && !before,
1070 "Unclaimed %s register 0x%x\n",
1071 read ? "read from" : "write to",
1072 i915_mmio_reg_offset(reg)))
1073 /* Only report the first N failures */
1074 i915_modparams.mmio_debug--;
1078 unclaimed_reg_debug(struct intel_uncore *uncore,
1079 const i915_reg_t reg,
1083 if (likely(!i915_modparams.mmio_debug))
1086 __unclaimed_reg_debug(uncore, reg, read, before);
1089 #define GEN2_READ_HEADER(x) \
1091 assert_rpm_wakelock_held(uncore->rpm);
1093 #define GEN2_READ_FOOTER \
1094 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1097 #define __gen2_read(x) \
1099 gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1100 GEN2_READ_HEADER(x); \
1101 val = __raw_uncore_read##x(uncore, reg); \
1105 #define __gen5_read(x) \
1107 gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1108 GEN2_READ_HEADER(x); \
1109 ilk_dummy_write(uncore); \
1110 val = __raw_uncore_read##x(uncore, reg); \
1126 #undef GEN2_READ_FOOTER
1127 #undef GEN2_READ_HEADER
1129 #define GEN6_READ_HEADER(x) \
1130 u32 offset = i915_mmio_reg_offset(reg); \
1131 unsigned long irqflags; \
1133 assert_rpm_wakelock_held(uncore->rpm); \
1134 spin_lock_irqsave(&uncore->lock, irqflags); \
1135 unclaimed_reg_debug(uncore, reg, true, true)
1137 #define GEN6_READ_FOOTER \
1138 unclaimed_reg_debug(uncore, reg, true, false); \
1139 spin_unlock_irqrestore(&uncore->lock, irqflags); \
1140 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1143 static noinline void ___force_wake_auto(struct intel_uncore *uncore,
1144 enum forcewake_domains fw_domains)
1146 struct intel_uncore_forcewake_domain *domain;
1149 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
1151 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)
1152 fw_domain_arm_timer(domain);
1154 uncore->funcs.force_wake_get(uncore, fw_domains);
1157 static inline void __force_wake_auto(struct intel_uncore *uncore,
1158 enum forcewake_domains fw_domains)
1160 if (WARN_ON(!fw_domains))
1163 /* Turn on all requested but inactive supported forcewake domains. */
1164 fw_domains &= uncore->fw_domains;
1165 fw_domains &= ~uncore->fw_domains_active;
1168 ___force_wake_auto(uncore, fw_domains);
1171 #define __gen_read(func, x) \
1173 func##_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1174 enum forcewake_domains fw_engine; \
1175 GEN6_READ_HEADER(x); \
1176 fw_engine = __##func##_reg_read_fw_domains(uncore, offset); \
1178 __force_wake_auto(uncore, fw_engine); \
1179 val = __raw_uncore_read##x(uncore, reg); \
1183 #define __gen_reg_read_funcs(func) \
1184 static enum forcewake_domains \
1185 func##_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \
1186 return __##func##_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg)); \
1189 __gen_read(func, 8) \
1190 __gen_read(func, 16) \
1191 __gen_read(func, 32) \
1192 __gen_read(func, 64)
1194 __gen_reg_read_funcs(gen11_fwtable);
1195 __gen_reg_read_funcs(fwtable);
1196 __gen_reg_read_funcs(gen6);
1198 #undef __gen_reg_read_funcs
1199 #undef GEN6_READ_FOOTER
1200 #undef GEN6_READ_HEADER
1202 #define GEN2_WRITE_HEADER \
1203 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1204 assert_rpm_wakelock_held(uncore->rpm); \
1206 #define GEN2_WRITE_FOOTER
1208 #define __gen2_write(x) \
1210 gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1211 GEN2_WRITE_HEADER; \
1212 __raw_uncore_write##x(uncore, reg, val); \
1213 GEN2_WRITE_FOOTER; \
1216 #define __gen5_write(x) \
1218 gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1219 GEN2_WRITE_HEADER; \
1220 ilk_dummy_write(uncore); \
1221 __raw_uncore_write##x(uncore, reg, val); \
1222 GEN2_WRITE_FOOTER; \
1235 #undef GEN2_WRITE_FOOTER
1236 #undef GEN2_WRITE_HEADER
1238 #define GEN6_WRITE_HEADER \
1239 u32 offset = i915_mmio_reg_offset(reg); \
1240 unsigned long irqflags; \
1241 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1242 assert_rpm_wakelock_held(uncore->rpm); \
1243 spin_lock_irqsave(&uncore->lock, irqflags); \
1244 unclaimed_reg_debug(uncore, reg, false, true)
1246 #define GEN6_WRITE_FOOTER \
1247 unclaimed_reg_debug(uncore, reg, false, false); \
1248 spin_unlock_irqrestore(&uncore->lock, irqflags)
1250 #define __gen6_write(x) \
1252 gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1253 GEN6_WRITE_HEADER; \
1254 if (NEEDS_FORCE_WAKE(offset)) \
1255 __gen6_gt_wait_for_fifo(uncore); \
1256 __raw_uncore_write##x(uncore, reg, val); \
1257 GEN6_WRITE_FOOTER; \
1263 #define __gen_write(func, x) \
1265 func##_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1266 enum forcewake_domains fw_engine; \
1267 GEN6_WRITE_HEADER; \
1268 fw_engine = __##func##_reg_write_fw_domains(uncore, offset); \
1270 __force_wake_auto(uncore, fw_engine); \
1271 __raw_uncore_write##x(uncore, reg, val); \
1272 GEN6_WRITE_FOOTER; \
1275 #define __gen_reg_write_funcs(func) \
1276 static enum forcewake_domains \
1277 func##_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \
1278 return __##func##_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg)); \
1281 __gen_write(func, 8) \
1282 __gen_write(func, 16) \
1283 __gen_write(func, 32)
1285 __gen_reg_write_funcs(gen11_fwtable);
1286 __gen_reg_write_funcs(fwtable);
1287 __gen_reg_write_funcs(gen8);
1289 #undef __gen_reg_write_funcs
1290 #undef GEN6_WRITE_FOOTER
1291 #undef GEN6_WRITE_HEADER
1293 #define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \
1295 (uncore)->funcs.mmio_writeb = x##_write8; \
1296 (uncore)->funcs.mmio_writew = x##_write16; \
1297 (uncore)->funcs.mmio_writel = x##_write32; \
1300 #define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x) \
1302 (uncore)->funcs.mmio_readb = x##_read8; \
1303 (uncore)->funcs.mmio_readw = x##_read16; \
1304 (uncore)->funcs.mmio_readl = x##_read32; \
1305 (uncore)->funcs.mmio_readq = x##_read64; \
1308 #define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
1310 ASSIGN_RAW_WRITE_MMIO_VFUNCS((uncore), x); \
1311 (uncore)->funcs.write_fw_domains = x##_reg_write_fw_domains; \
1314 #define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
1316 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x); \
1317 (uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \
1320 static int __fw_domain_init(struct intel_uncore *uncore,
1321 enum forcewake_domain_id domain_id,
1325 struct intel_uncore_forcewake_domain *d;
1327 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
1328 GEM_BUG_ON(uncore->fw_domain[domain_id]);
1330 if (i915_inject_load_failure())
1333 d = kzalloc(sizeof(*d), GFP_KERNEL);
1337 WARN_ON(!i915_mmio_reg_valid(reg_set));
1338 WARN_ON(!i915_mmio_reg_valid(reg_ack));
1342 d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set);
1343 d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack);
1347 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
1348 BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
1349 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
1350 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0));
1351 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1));
1352 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2));
1353 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3));
1354 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0));
1355 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1));
1357 d->mask = BIT(domain_id);
1359 hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1360 d->timer.function = intel_uncore_fw_release_timer;
1362 uncore->fw_domains |= BIT(domain_id);
1366 uncore->fw_domain[domain_id] = d;
1371 static void fw_domain_fini(struct intel_uncore *uncore,
1372 enum forcewake_domain_id domain_id)
1374 struct intel_uncore_forcewake_domain *d;
1376 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
1378 d = fetch_and_zero(&uncore->fw_domain[domain_id]);
1382 uncore->fw_domains &= ~BIT(domain_id);
1383 WARN_ON(d->wake_count);
1384 WARN_ON(hrtimer_cancel(&d->timer));
1388 static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore)
1390 struct intel_uncore_forcewake_domain *d;
1393 for_each_fw_domain(d, uncore, tmp)
1394 fw_domain_fini(uncore, d->id);
1397 static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
1399 struct drm_i915_private *i915 = uncore->i915;
1402 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
1404 #define fw_domain_init(uncore__, id__, set__, ack__) \
1405 (ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__))))
1407 if (INTEL_GEN(i915) >= 11) {
1410 uncore->funcs.force_wake_get = fw_domains_get_with_fallback;
1411 uncore->funcs.force_wake_put = fw_domains_put;
1412 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1413 FORCEWAKE_RENDER_GEN9,
1414 FORCEWAKE_ACK_RENDER_GEN9);
1415 fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
1416 FORCEWAKE_BLITTER_GEN9,
1417 FORCEWAKE_ACK_BLITTER_GEN9);
1419 for (i = 0; i < I915_MAX_VCS; i++) {
1420 if (!HAS_ENGINE(i915, _VCS(i)))
1423 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
1424 FORCEWAKE_MEDIA_VDBOX_GEN11(i),
1425 FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
1427 for (i = 0; i < I915_MAX_VECS; i++) {
1428 if (!HAS_ENGINE(i915, _VECS(i)))
1431 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
1432 FORCEWAKE_MEDIA_VEBOX_GEN11(i),
1433 FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
1435 } else if (IS_GEN_RANGE(i915, 9, 10)) {
1436 uncore->funcs.force_wake_get = fw_domains_get_with_fallback;
1437 uncore->funcs.force_wake_put = fw_domains_put;
1438 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1439 FORCEWAKE_RENDER_GEN9,
1440 FORCEWAKE_ACK_RENDER_GEN9);
1441 fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
1442 FORCEWAKE_BLITTER_GEN9,
1443 FORCEWAKE_ACK_BLITTER_GEN9);
1444 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
1445 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
1446 } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
1447 uncore->funcs.force_wake_get = fw_domains_get;
1448 uncore->funcs.force_wake_put = fw_domains_put;
1449 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1450 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1451 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
1452 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1453 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
1454 uncore->funcs.force_wake_get =
1455 fw_domains_get_with_thread_status;
1456 uncore->funcs.force_wake_put = fw_domains_put;
1457 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1458 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1459 } else if (IS_IVYBRIDGE(i915)) {
1462 /* IVB configs may use multi-threaded forcewake */
1464 /* A small trick here - if the bios hasn't configured
1465 * MT forcewake, and if the device is in RC6, then
1466 * force_wake_mt_get will not wake the device and the
1467 * ECOBUS read will return zero. Which will be
1468 * (correctly) interpreted by the test below as MT
1469 * forcewake being disabled.
1471 uncore->funcs.force_wake_get =
1472 fw_domains_get_with_thread_status;
1473 uncore->funcs.force_wake_put = fw_domains_put;
1475 /* We need to init first for ECOBUS access and then
1476 * determine later if we want to reinit, in case of MT access is
1477 * not working. In this stage we don't know which flavour this
1478 * ivb is, so it is better to reset also the gen6 fw registers
1479 * before the ecobus check.
1482 __raw_uncore_write32(uncore, FORCEWAKE, 0);
1483 __raw_posting_read(uncore, ECOBUS);
1485 ret = __fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1486 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1490 spin_lock_irq(&uncore->lock);
1491 fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
1492 ecobus = __raw_uncore_read32(uncore, ECOBUS);
1493 fw_domains_put(uncore, FORCEWAKE_RENDER);
1494 spin_unlock_irq(&uncore->lock);
1496 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1497 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1498 DRM_INFO("when using vblank-synced partial screen updates.\n");
1499 fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER);
1500 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1501 FORCEWAKE, FORCEWAKE_ACK);
1503 } else if (IS_GEN(i915, 6)) {
1504 uncore->funcs.force_wake_get =
1505 fw_domains_get_with_thread_status;
1506 uncore->funcs.force_wake_put = fw_domains_put;
1507 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1508 FORCEWAKE, FORCEWAKE_ACK);
1511 #undef fw_domain_init
1513 /* All future platforms are expected to require complex power gating */
1514 WARN_ON(!ret && uncore->fw_domains == 0);
1518 intel_uncore_fw_domains_fini(uncore);
1523 #define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \
1525 (uncore)->fw_domains_table = \
1526 (struct intel_forcewake_range *)(d); \
1527 (uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \
1530 static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
1531 unsigned long action, void *data)
1533 struct intel_uncore *uncore = container_of(nb,
1534 struct intel_uncore, pmic_bus_access_nb);
1537 case MBI_PMIC_BUS_ACCESS_BEGIN:
1539 * forcewake all now to make sure that we don't need to do a
1540 * forcewake later which on systems where this notifier gets
1541 * called requires the punit to access to the shared pmic i2c
1542 * bus, which will be busy after this notification, leading to:
1543 * "render: timed out waiting for forcewake ack request."
1546 * The notifier is unregistered during intel_runtime_suspend(),
1547 * so it's ok to access the HW here without holding a RPM
1548 * wake reference -> disable wakeref asserts for the time of
1551 disable_rpm_wakeref_asserts(uncore->rpm);
1552 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
1553 enable_rpm_wakeref_asserts(uncore->rpm);
1555 case MBI_PMIC_BUS_ACCESS_END:
1556 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
1563 static int uncore_mmio_setup(struct intel_uncore *uncore)
1565 struct drm_i915_private *i915 = uncore->i915;
1566 struct pci_dev *pdev = i915->drm.pdev;
1570 mmio_bar = IS_GEN(i915, 2) ? 1 : 0;
1572 * Before gen4, the registers and the GTT are behind different BARs.
1573 * However, from gen4 onwards, the registers and the GTT are shared
1574 * in the same BAR, so we want to restrict this ioremap from
1575 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
1576 * the register BAR remains the same size for all the earlier
1577 * generations up to Ironlake.
1579 if (INTEL_GEN(i915) < 5)
1580 mmio_size = 512 * 1024;
1582 mmio_size = 2 * 1024 * 1024;
1583 uncore->regs = pci_iomap(pdev, mmio_bar, mmio_size);
1584 if (uncore->regs == NULL) {
1585 DRM_ERROR("failed to map registers\n");
1593 static void uncore_mmio_cleanup(struct intel_uncore *uncore)
1595 struct pci_dev *pdev = uncore->i915->drm.pdev;
1597 pci_iounmap(pdev, uncore->regs);
1600 void intel_uncore_init_early(struct intel_uncore *uncore,
1601 struct drm_i915_private *i915)
1603 spin_lock_init(&uncore->lock);
1604 uncore->i915 = i915;
1605 uncore->rpm = &i915->runtime_pm;
1608 static void uncore_raw_init(struct intel_uncore *uncore)
1610 GEM_BUG_ON(intel_uncore_has_forcewake(uncore));
1612 if (IS_GEN(uncore->i915, 5)) {
1613 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5);
1614 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5);
1616 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2);
1617 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2);
1621 static int uncore_forcewake_init(struct intel_uncore *uncore)
1623 struct drm_i915_private *i915 = uncore->i915;
1626 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
1628 ret = intel_uncore_fw_domains_init(uncore);
1632 forcewake_early_sanitize(uncore, 0);
1634 if (IS_GEN_RANGE(i915, 6, 7)) {
1635 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
1637 if (IS_VALLEYVIEW(i915)) {
1638 ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges);
1639 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
1641 ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
1643 } else if (IS_GEN(i915, 8)) {
1644 if (IS_CHERRYVIEW(i915)) {
1645 ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
1646 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
1647 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
1649 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8);
1650 ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
1652 } else if (IS_GEN_RANGE(i915, 9, 10)) {
1653 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
1654 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
1655 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
1657 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
1658 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable);
1659 ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
1662 uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier;
1663 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
1668 int intel_uncore_init_mmio(struct intel_uncore *uncore)
1670 struct drm_i915_private *i915 = uncore->i915;
1673 ret = uncore_mmio_setup(uncore);
1677 if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915))
1678 uncore->flags |= UNCORE_HAS_FORCEWAKE;
1680 uncore->unclaimed_mmio_check = 1;
1682 if (!intel_uncore_has_forcewake(uncore)) {
1683 uncore_raw_init(uncore);
1685 ret = uncore_forcewake_init(uncore);
1687 goto out_mmio_cleanup;
1690 /* make sure fw funcs are set if and only if we have fw*/
1691 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_get);
1692 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_put);
1693 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains);
1694 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains);
1696 if (HAS_FPGA_DBG_UNCLAIMED(i915))
1697 uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED;
1699 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1700 uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED;
1702 if (IS_GEN_RANGE(i915, 6, 7))
1703 uncore->flags |= UNCORE_HAS_FIFO;
1705 /* clear out unclaimed reg detection bit */
1706 if (check_for_unclaimed_mmio(uncore))
1707 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
1712 uncore_mmio_cleanup(uncore);
1718 * We might have detected that some engines are fused off after we initialized
1719 * the forcewake domains. Prune them, to make sure they only reference existing
1722 void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore)
1724 struct drm_i915_private *i915 = uncore->i915;
1725 enum forcewake_domains fw_domains = uncore->fw_domains;
1726 enum forcewake_domain_id domain_id;
1729 if (!intel_uncore_has_forcewake(uncore) || INTEL_GEN(i915) < 11)
1732 for (i = 0; i < I915_MAX_VCS; i++) {
1733 domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
1735 if (HAS_ENGINE(i915, _VCS(i)))
1738 if (fw_domains & BIT(domain_id))
1739 fw_domain_fini(uncore, domain_id);
1742 for (i = 0; i < I915_MAX_VECS; i++) {
1743 domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
1745 if (HAS_ENGINE(i915, _VECS(i)))
1748 if (fw_domains & BIT(domain_id))
1749 fw_domain_fini(uncore, domain_id);
1753 void intel_uncore_fini_mmio(struct intel_uncore *uncore)
1755 if (intel_uncore_has_forcewake(uncore)) {
1756 iosf_mbi_punit_acquire();
1757 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
1758 &uncore->pmic_bus_access_nb);
1759 intel_uncore_forcewake_reset(uncore);
1760 intel_uncore_fw_domains_fini(uncore);
1761 iosf_mbi_punit_release();
1764 uncore_mmio_cleanup(uncore);
1767 static const struct reg_whitelist {
1768 i915_reg_t offset_ldw;
1769 i915_reg_t offset_udw;
1772 } reg_read_whitelist[] = { {
1773 .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
1774 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
1775 .gen_mask = INTEL_GEN_MASK(4, 11),
1779 int i915_reg_read_ioctl(struct drm_device *dev,
1780 void *data, struct drm_file *file)
1782 struct drm_i915_private *i915 = to_i915(dev);
1783 struct intel_uncore *uncore = &i915->uncore;
1784 struct drm_i915_reg_read *reg = data;
1785 struct reg_whitelist const *entry;
1786 intel_wakeref_t wakeref;
1791 entry = reg_read_whitelist;
1792 remain = ARRAY_SIZE(reg_read_whitelist);
1794 u32 entry_offset = i915_mmio_reg_offset(entry->offset_ldw);
1796 GEM_BUG_ON(!is_power_of_2(entry->size));
1797 GEM_BUG_ON(entry->size > 8);
1798 GEM_BUG_ON(entry_offset & (entry->size - 1));
1800 if (INTEL_INFO(i915)->gen_mask & entry->gen_mask &&
1801 entry_offset == (reg->offset & -entry->size))
1810 flags = reg->offset & (entry->size - 1);
1812 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
1813 if (entry->size == 8 && flags == I915_REG_READ_8B_WA)
1814 reg->val = intel_uncore_read64_2x32(uncore,
1817 else if (entry->size == 8 && flags == 0)
1818 reg->val = intel_uncore_read64(uncore,
1820 else if (entry->size == 4 && flags == 0)
1821 reg->val = intel_uncore_read(uncore, entry->offset_ldw);
1822 else if (entry->size == 2 && flags == 0)
1823 reg->val = intel_uncore_read16(uncore,
1825 else if (entry->size == 1 && flags == 0)
1826 reg->val = intel_uncore_read8(uncore,
1836 * __intel_wait_for_register_fw - wait until register matches expected state
1837 * @uncore: the struct intel_uncore
1838 * @reg: the register to read
1839 * @mask: mask to apply to register value
1840 * @value: expected value
1841 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
1842 * @slow_timeout_ms: slow timeout in millisecond
1843 * @out_value: optional placeholder to hold registry value
1845 * This routine waits until the target register @reg contains the expected
1846 * @value after applying the @mask, i.e. it waits until ::
1848 * (I915_READ_FW(reg) & mask) == value
1850 * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
1851 * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
1852 * must be not larger than 20,0000 microseconds.
1854 * Note that this routine assumes the caller holds forcewake asserted, it is
1855 * not suitable for very long waits. See intel_wait_for_register() if you
1856 * wish to wait without holding forcewake for the duration (i.e. you expect
1857 * the wait to be slow).
1859 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1861 int __intel_wait_for_register_fw(struct intel_uncore *uncore,
1865 unsigned int fast_timeout_us,
1866 unsigned int slow_timeout_ms,
1869 u32 uninitialized_var(reg_value);
1870 #define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value)
1873 /* Catch any overuse of this function */
1874 might_sleep_if(slow_timeout_ms);
1875 GEM_BUG_ON(fast_timeout_us > 20000);
1878 if (fast_timeout_us && fast_timeout_us <= 20000)
1879 ret = _wait_for_atomic(done, fast_timeout_us, 0);
1880 if (ret && slow_timeout_ms)
1881 ret = wait_for(done, slow_timeout_ms);
1884 *out_value = reg_value;
1891 * __intel_wait_for_register - wait until register matches expected state
1892 * @uncore: the struct intel_uncore
1893 * @reg: the register to read
1894 * @mask: mask to apply to register value
1895 * @value: expected value
1896 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
1897 * @slow_timeout_ms: slow timeout in millisecond
1898 * @out_value: optional placeholder to hold registry value
1900 * This routine waits until the target register @reg contains the expected
1901 * @value after applying the @mask, i.e. it waits until ::
1903 * (I915_READ(reg) & mask) == value
1905 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
1907 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1909 int __intel_wait_for_register(struct intel_uncore *uncore,
1913 unsigned int fast_timeout_us,
1914 unsigned int slow_timeout_ms,
1918 intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
1922 might_sleep_if(slow_timeout_ms);
1924 spin_lock_irq(&uncore->lock);
1925 intel_uncore_forcewake_get__locked(uncore, fw);
1927 ret = __intel_wait_for_register_fw(uncore,
1929 fast_timeout_us, 0, ®_value);
1931 intel_uncore_forcewake_put__locked(uncore, fw);
1932 spin_unlock_irq(&uncore->lock);
1934 if (ret && slow_timeout_ms)
1935 ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore,
1937 (reg_value & mask) == value,
1938 slow_timeout_ms * 1000, 10, 1000);
1940 /* just trace the final value */
1941 trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
1944 *out_value = reg_value;
1949 bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
1951 return check_for_unclaimed_mmio(uncore);
1955 intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
1959 spin_lock_irq(&uncore->lock);
1961 if (unlikely(uncore->unclaimed_mmio_check <= 0))
1964 if (unlikely(intel_uncore_unclaimed_mmio(uncore))) {
1965 if (!i915_modparams.mmio_debug) {
1966 DRM_DEBUG("Unclaimed register detected, "
1967 "enabling oneshot unclaimed register reporting. "
1968 "Please use i915.mmio_debug=N for more information.\n");
1969 i915_modparams.mmio_debug++;
1971 uncore->unclaimed_mmio_check--;
1976 spin_unlock_irq(&uncore->lock);
1982 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
1984 * @uncore: pointer to struct intel_uncore
1985 * @reg: register in question
1986 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
1988 * Returns a set of forcewake domains required to be taken with for example
1989 * intel_uncore_forcewake_get for the specified register to be accessible in the
1990 * specified mode (read, write or read/write) with raw mmio accessors.
1992 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
1993 * callers to do FIFO management on their own or risk losing writes.
1995 enum forcewake_domains
1996 intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
1997 i915_reg_t reg, unsigned int op)
1999 enum forcewake_domains fw_domains = 0;
2003 if (!intel_uncore_has_forcewake(uncore))
2006 if (op & FW_REG_READ)
2007 fw_domains = uncore->funcs.read_fw_domains(uncore, reg);
2009 if (op & FW_REG_WRITE)
2010 fw_domains |= uncore->funcs.write_fw_domains(uncore, reg);
2012 WARN_ON(fw_domains & ~uncore->fw_domains);
2017 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2018 #include "selftests/mock_uncore.c"
2019 #include "selftests/intel_uncore.c"