2 * Copyright © 2013 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <linux/pm_runtime.h>
25 #include <asm/iosf_mbi.h>
27 #include "gt/intel_lrc_reg.h" /* for shadow reg list */
30 #include "i915_trace.h"
31 #include "i915_vgpu.h"
34 #define FORCEWAKE_ACK_TIMEOUT_MS 50
35 #define GT_FIFO_TIMEOUT_MS 10
37 #define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
40 intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug)
42 spin_lock_init(&mmio_debug->lock);
43 mmio_debug->unclaimed_mmio_check = 1;
46 static void mmio_debug_suspend(struct intel_uncore_mmio_debug *mmio_debug)
48 lockdep_assert_held(&mmio_debug->lock);
50 /* Save and disable mmio debugging for the user bypass */
51 if (!mmio_debug->suspend_count++) {
52 mmio_debug->saved_mmio_check = mmio_debug->unclaimed_mmio_check;
53 mmio_debug->unclaimed_mmio_check = 0;
57 static void mmio_debug_resume(struct intel_uncore_mmio_debug *mmio_debug)
59 lockdep_assert_held(&mmio_debug->lock);
61 if (!--mmio_debug->suspend_count)
62 mmio_debug->unclaimed_mmio_check = mmio_debug->saved_mmio_check;
65 static const char * const forcewake_domain_names[] = {
84 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
86 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
88 if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
89 return forcewake_domain_names[id];
96 #define fw_ack(d) readl((d)->reg_ack)
97 #define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set)
98 #define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set)
101 fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
104 * We don't really know if the powerwell for the forcewake domain we are
105 * trying to reset here does exist at this point (engines could be fused
106 * off in ICL+), so no waiting for acks
108 /* WaRsClearFWBitsAtReset:bdw,skl */
113 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
115 GEM_BUG_ON(d->uncore->fw_domains_timer & d->mask);
116 d->uncore->fw_domains_timer |= d->mask;
118 hrtimer_start_range_ns(&d->timer,
125 __wait_for_ack(const struct intel_uncore_forcewake_domain *d,
129 return wait_for_atomic((fw_ack(d) & ack) == value,
130 FORCEWAKE_ACK_TIMEOUT_MS);
134 wait_ack_clear(const struct intel_uncore_forcewake_domain *d,
137 return __wait_for_ack(d, ack, 0);
141 wait_ack_set(const struct intel_uncore_forcewake_domain *d,
144 return __wait_for_ack(d, ack, ack);
148 fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
150 if (wait_ack_clear(d, FORCEWAKE_KERNEL)) {
151 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
152 intel_uncore_forcewake_domain_to_str(d->id));
153 add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
163 fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d,
164 const enum ack_type type)
166 const u32 ack_bit = FORCEWAKE_KERNEL;
167 const u32 value = type == ACK_SET ? ack_bit : 0;
172 * There is a possibility of driver's wake request colliding
173 * with hardware's own wake requests and that can cause
174 * hardware to not deliver the driver's ack message.
176 * Use a fallback bit toggle to kick the gpu state machine
177 * in the hope that the original ack will be delivered along with
180 * This workaround is described in HSDES #1604254524 and it's known as:
181 * WaRsForcewakeAddDelayForAck:skl,bxt,kbl,glk,cfl,cnl,icl
182 * although the name is a bit misleading.
187 wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK);
189 fw_set(d, FORCEWAKE_KERNEL_FALLBACK);
190 /* Give gt some time to relax before the polling frenzy */
192 wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK);
194 ack_detected = (fw_ack(d) & ack_bit) == value;
196 fw_clear(d, FORCEWAKE_KERNEL_FALLBACK);
197 } while (!ack_detected && pass++ < 10);
199 DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n",
200 intel_uncore_forcewake_domain_to_str(d->id),
201 type == ACK_SET ? "set" : "clear",
205 return ack_detected ? 0 : -ETIMEDOUT;
209 fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d)
211 if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL)))
214 if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR))
215 fw_domain_wait_ack_clear(d);
219 fw_domain_get(const struct intel_uncore_forcewake_domain *d)
221 fw_set(d, FORCEWAKE_KERNEL);
225 fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
227 if (wait_ack_set(d, FORCEWAKE_KERNEL)) {
228 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
229 intel_uncore_forcewake_domain_to_str(d->id));
230 add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
235 fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d)
237 if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL)))
240 if (fw_domain_wait_ack_with_fallback(d, ACK_SET))
241 fw_domain_wait_ack_set(d);
245 fw_domain_put(const struct intel_uncore_forcewake_domain *d)
247 fw_clear(d, FORCEWAKE_KERNEL);
251 fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
253 struct intel_uncore_forcewake_domain *d;
256 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
258 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
259 fw_domain_wait_ack_clear(d);
263 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
264 fw_domain_wait_ack_set(d);
266 uncore->fw_domains_active |= fw_domains;
270 fw_domains_get_with_fallback(struct intel_uncore *uncore,
271 enum forcewake_domains fw_domains)
273 struct intel_uncore_forcewake_domain *d;
276 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
278 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
279 fw_domain_wait_ack_clear_fallback(d);
283 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
284 fw_domain_wait_ack_set_fallback(d);
286 uncore->fw_domains_active |= fw_domains;
290 fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
292 struct intel_uncore_forcewake_domain *d;
295 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
297 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
300 uncore->fw_domains_active &= ~fw_domains;
304 fw_domains_reset(struct intel_uncore *uncore,
305 enum forcewake_domains fw_domains)
307 struct intel_uncore_forcewake_domain *d;
313 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
315 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
319 static inline u32 gt_thread_status(struct intel_uncore *uncore)
323 val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG);
324 val &= GEN6_GT_THREAD_STATUS_CORE_MASK;
329 static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
332 * w/a for a sporadic read returning 0 by waiting for the GT
335 drm_WARN_ONCE(&uncore->i915->drm,
336 wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
337 "GT thread status wait timed out\n");
340 static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
341 enum forcewake_domains fw_domains)
343 fw_domains_get(uncore, fw_domains);
345 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
346 __gen6_gt_wait_for_thread_c0(uncore);
349 static inline u32 fifo_free_entries(struct intel_uncore *uncore)
351 u32 count = __raw_uncore_read32(uncore, GTFIFOCTL);
353 return count & GT_FIFO_FREE_ENTRIES_MASK;
356 static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
360 /* On VLV, FIFO will be shared by both SW and HW.
361 * So, we need to read the FREE_ENTRIES everytime */
362 if (IS_VALLEYVIEW(uncore->i915))
363 n = fifo_free_entries(uncore);
365 n = uncore->fifo_count;
367 if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
368 if (wait_for_atomic((n = fifo_free_entries(uncore)) >
369 GT_FIFO_NUM_RESERVED_ENTRIES,
370 GT_FIFO_TIMEOUT_MS)) {
371 drm_dbg(&uncore->i915->drm,
372 "GT_FIFO timeout, entries: %u\n", n);
377 uncore->fifo_count = n - 1;
380 static enum hrtimer_restart
381 intel_uncore_fw_release_timer(struct hrtimer *timer)
383 struct intel_uncore_forcewake_domain *domain =
384 container_of(timer, struct intel_uncore_forcewake_domain, timer);
385 struct intel_uncore *uncore = domain->uncore;
386 unsigned long irqflags;
388 assert_rpm_device_not_suspended(uncore->rpm);
390 if (xchg(&domain->active, false))
391 return HRTIMER_RESTART;
393 spin_lock_irqsave(&uncore->lock, irqflags);
395 uncore->fw_domains_timer &= ~domain->mask;
397 GEM_BUG_ON(!domain->wake_count);
398 if (--domain->wake_count == 0)
399 uncore->funcs.force_wake_put(uncore, domain->mask);
401 spin_unlock_irqrestore(&uncore->lock, irqflags);
403 return HRTIMER_NORESTART;
406 /* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
408 intel_uncore_forcewake_reset(struct intel_uncore *uncore)
410 unsigned long irqflags;
411 struct intel_uncore_forcewake_domain *domain;
412 int retry_count = 100;
413 enum forcewake_domains fw, active_domains;
415 iosf_mbi_assert_punit_acquired();
417 /* Hold uncore.lock across reset to prevent any register access
418 * with forcewake not set correctly. Wait until all pending
419 * timers are run before holding.
426 for_each_fw_domain(domain, uncore, tmp) {
427 smp_store_mb(domain->active, false);
428 if (hrtimer_cancel(&domain->timer) == 0)
431 intel_uncore_fw_release_timer(&domain->timer);
434 spin_lock_irqsave(&uncore->lock, irqflags);
436 for_each_fw_domain(domain, uncore, tmp) {
437 if (hrtimer_active(&domain->timer))
438 active_domains |= domain->mask;
441 if (active_domains == 0)
444 if (--retry_count == 0) {
445 drm_err(&uncore->i915->drm, "Timed out waiting for forcewake timers to finish\n");
449 spin_unlock_irqrestore(&uncore->lock, irqflags);
453 drm_WARN_ON(&uncore->i915->drm, active_domains);
455 fw = uncore->fw_domains_active;
457 uncore->funcs.force_wake_put(uncore, fw);
459 fw_domains_reset(uncore, uncore->fw_domains);
460 assert_forcewakes_inactive(uncore);
462 spin_unlock_irqrestore(&uncore->lock, irqflags);
464 return fw; /* track the lost user forcewake domains */
468 fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore)
472 dbg = __raw_uncore_read32(uncore, FPGA_DBG);
473 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
477 * Bugs in PCI programming (or failing hardware) can occasionally cause
478 * us to lose access to the MMIO BAR. When this happens, register
479 * reads will come back with 0xFFFFFFFF for every register and things
480 * go bad very quickly. Let's try to detect that special case and at
481 * least try to print a more informative message about what has
484 * During normal operation the FPGA_DBG register has several unused
485 * bits that will always read back as 0's so we can use them as canaries
486 * to recognize when MMIO accesses are just busted.
488 if (unlikely(dbg == ~0))
489 drm_err(&uncore->i915->drm,
490 "Lost access to MMIO BAR; all registers now read back as 0xFFFFFFFF!\n");
492 __raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
498 vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore)
502 cer = __raw_uncore_read32(uncore, CLAIM_ER);
503 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
506 __raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR);
512 gen6_check_for_fifo_debug(struct intel_uncore *uncore)
516 fifodbg = __raw_uncore_read32(uncore, GTFIFODBG);
518 if (unlikely(fifodbg)) {
519 drm_dbg(&uncore->i915->drm, "GTFIFODBG = 0x08%x\n", fifodbg);
520 __raw_uncore_write32(uncore, GTFIFODBG, fifodbg);
527 check_for_unclaimed_mmio(struct intel_uncore *uncore)
531 lockdep_assert_held(&uncore->debug->lock);
533 if (uncore->debug->suspend_count)
536 if (intel_uncore_has_fpga_dbg_unclaimed(uncore))
537 ret |= fpga_check_for_unclaimed_mmio(uncore);
539 if (intel_uncore_has_dbg_unclaimed(uncore))
540 ret |= vlv_check_for_unclaimed_mmio(uncore);
542 if (intel_uncore_has_fifo(uncore))
543 ret |= gen6_check_for_fifo_debug(uncore);
548 static void forcewake_early_sanitize(struct intel_uncore *uncore,
549 unsigned int restore_forcewake)
551 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
553 /* WaDisableShadowRegForCpd:chv */
554 if (IS_CHERRYVIEW(uncore->i915)) {
555 __raw_uncore_write32(uncore, GTFIFOCTL,
556 __raw_uncore_read32(uncore, GTFIFOCTL) |
557 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
558 GT_FIFO_CTL_RC6_POLICY_STALL);
561 iosf_mbi_punit_acquire();
562 intel_uncore_forcewake_reset(uncore);
563 if (restore_forcewake) {
564 spin_lock_irq(&uncore->lock);
565 uncore->funcs.force_wake_get(uncore, restore_forcewake);
567 if (intel_uncore_has_fifo(uncore))
568 uncore->fifo_count = fifo_free_entries(uncore);
569 spin_unlock_irq(&uncore->lock);
571 iosf_mbi_punit_release();
574 void intel_uncore_suspend(struct intel_uncore *uncore)
576 if (!intel_uncore_has_forcewake(uncore))
579 iosf_mbi_punit_acquire();
580 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
581 &uncore->pmic_bus_access_nb);
582 uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore);
583 iosf_mbi_punit_release();
586 void intel_uncore_resume_early(struct intel_uncore *uncore)
588 unsigned int restore_forcewake;
590 if (intel_uncore_unclaimed_mmio(uncore))
591 drm_dbg(&uncore->i915->drm, "unclaimed mmio detected on resume, clearing\n");
593 if (!intel_uncore_has_forcewake(uncore))
596 restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
597 forcewake_early_sanitize(uncore, restore_forcewake);
599 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
602 void intel_uncore_runtime_resume(struct intel_uncore *uncore)
604 if (!intel_uncore_has_forcewake(uncore))
607 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
610 static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
611 enum forcewake_domains fw_domains)
613 struct intel_uncore_forcewake_domain *domain;
616 fw_domains &= uncore->fw_domains;
618 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
619 if (domain->wake_count++) {
620 fw_domains &= ~domain->mask;
621 domain->active = true;
626 uncore->funcs.force_wake_get(uncore, fw_domains);
630 * intel_uncore_forcewake_get - grab forcewake domain references
631 * @uncore: the intel_uncore structure
632 * @fw_domains: forcewake domains to get reference on
634 * This function can be used get GT's forcewake domain references.
635 * Normal register access will handle the forcewake domains automatically.
636 * However if some sequence requires the GT to not power down a particular
637 * forcewake domains this function should be called at the beginning of the
638 * sequence. And subsequently the reference should be dropped by symmetric
639 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
640 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
642 void intel_uncore_forcewake_get(struct intel_uncore *uncore,
643 enum forcewake_domains fw_domains)
645 unsigned long irqflags;
647 if (!uncore->funcs.force_wake_get)
650 assert_rpm_wakelock_held(uncore->rpm);
652 spin_lock_irqsave(&uncore->lock, irqflags);
653 __intel_uncore_forcewake_get(uncore, fw_domains);
654 spin_unlock_irqrestore(&uncore->lock, irqflags);
658 * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
659 * @uncore: the intel_uncore structure
661 * This function is a wrapper around intel_uncore_forcewake_get() to acquire
662 * the GT powerwell and in the process disable our debugging for the
663 * duration of userspace's bypass.
665 void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
667 spin_lock_irq(&uncore->lock);
668 if (!uncore->user_forcewake_count++) {
669 intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
670 spin_lock(&uncore->debug->lock);
671 mmio_debug_suspend(uncore->debug);
672 spin_unlock(&uncore->debug->lock);
674 spin_unlock_irq(&uncore->lock);
678 * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
679 * @uncore: the intel_uncore structure
681 * This function complements intel_uncore_forcewake_user_get() and releases
682 * the GT powerwell taken on behalf of the userspace bypass.
684 void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
686 spin_lock_irq(&uncore->lock);
687 if (!--uncore->user_forcewake_count) {
688 spin_lock(&uncore->debug->lock);
689 mmio_debug_resume(uncore->debug);
691 if (check_for_unclaimed_mmio(uncore))
692 drm_info(&uncore->i915->drm,
693 "Invalid mmio detected during user access\n");
694 spin_unlock(&uncore->debug->lock);
696 intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
698 spin_unlock_irq(&uncore->lock);
702 * intel_uncore_forcewake_get__locked - grab forcewake domain references
703 * @uncore: the intel_uncore structure
704 * @fw_domains: forcewake domains to get reference on
706 * See intel_uncore_forcewake_get(). This variant places the onus
707 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
709 void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
710 enum forcewake_domains fw_domains)
712 lockdep_assert_held(&uncore->lock);
714 if (!uncore->funcs.force_wake_get)
717 __intel_uncore_forcewake_get(uncore, fw_domains);
720 static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
721 enum forcewake_domains fw_domains)
723 struct intel_uncore_forcewake_domain *domain;
726 fw_domains &= uncore->fw_domains;
728 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
729 GEM_BUG_ON(!domain->wake_count);
731 if (--domain->wake_count) {
732 domain->active = true;
736 uncore->funcs.force_wake_put(uncore, domain->mask);
741 * intel_uncore_forcewake_put - release a forcewake domain reference
742 * @uncore: the intel_uncore structure
743 * @fw_domains: forcewake domains to put references
745 * This function drops the device-level forcewakes for specified
746 * domains obtained by intel_uncore_forcewake_get().
748 void intel_uncore_forcewake_put(struct intel_uncore *uncore,
749 enum forcewake_domains fw_domains)
751 unsigned long irqflags;
753 if (!uncore->funcs.force_wake_put)
756 spin_lock_irqsave(&uncore->lock, irqflags);
757 __intel_uncore_forcewake_put(uncore, fw_domains);
758 spin_unlock_irqrestore(&uncore->lock, irqflags);
762 * intel_uncore_forcewake_flush - flush the delayed release
763 * @uncore: the intel_uncore structure
764 * @fw_domains: forcewake domains to flush
766 void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
767 enum forcewake_domains fw_domains)
769 struct intel_uncore_forcewake_domain *domain;
772 if (!uncore->funcs.force_wake_put)
775 fw_domains &= uncore->fw_domains;
776 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
777 WRITE_ONCE(domain->active, false);
778 if (hrtimer_cancel(&domain->timer))
779 intel_uncore_fw_release_timer(&domain->timer);
784 * intel_uncore_forcewake_put__locked - grab forcewake domain references
785 * @uncore: the intel_uncore structure
786 * @fw_domains: forcewake domains to get reference on
788 * See intel_uncore_forcewake_put(). This variant places the onus
789 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
791 void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
792 enum forcewake_domains fw_domains)
794 lockdep_assert_held(&uncore->lock);
796 if (!uncore->funcs.force_wake_put)
799 __intel_uncore_forcewake_put(uncore, fw_domains);
802 void assert_forcewakes_inactive(struct intel_uncore *uncore)
804 if (!uncore->funcs.force_wake_get)
807 drm_WARN(&uncore->i915->drm, uncore->fw_domains_active,
808 "Expected all fw_domains to be inactive, but %08x are still on\n",
809 uncore->fw_domains_active);
812 void assert_forcewakes_active(struct intel_uncore *uncore,
813 enum forcewake_domains fw_domains)
815 struct intel_uncore_forcewake_domain *domain;
818 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
821 if (!uncore->funcs.force_wake_get)
824 spin_lock_irq(&uncore->lock);
826 assert_rpm_wakelock_held(uncore->rpm);
828 fw_domains &= uncore->fw_domains;
829 drm_WARN(&uncore->i915->drm, fw_domains & ~uncore->fw_domains_active,
830 "Expected %08x fw_domains to be active, but %08x are off\n",
831 fw_domains, fw_domains & ~uncore->fw_domains_active);
834 * Check that the caller has an explicit wakeref and we don't mistake
835 * it for the auto wakeref.
837 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
838 unsigned int actual = READ_ONCE(domain->wake_count);
839 unsigned int expect = 1;
841 if (uncore->fw_domains_timer & domain->mask)
842 expect++; /* pending automatic release */
844 if (drm_WARN(&uncore->i915->drm, actual < expect,
845 "Expected domain %d to be held awake by caller, count=%d\n",
850 spin_unlock_irq(&uncore->lock);
853 /* We give fast paths for the really cool registers */
854 #define NEEDS_FORCE_WAKE(reg) ({ \
856 __reg < 0x40000 || __reg >= GEN11_BSD_RING_BASE; \
859 static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
861 if (offset < entry->start)
863 else if (offset > entry->end)
869 /* Copied and "macroized" from lib/bsearch.c */
870 #define BSEARCH(key, base, num, cmp) ({ \
871 unsigned int start__ = 0, end__ = (num); \
872 typeof(base) result__ = NULL; \
873 while (start__ < end__) { \
874 unsigned int mid__ = start__ + (end__ - start__) / 2; \
875 int ret__ = (cmp)((key), (base) + mid__); \
878 } else if (ret__ > 0) { \
879 start__ = mid__ + 1; \
881 result__ = (base) + mid__; \
888 static enum forcewake_domains
889 find_fw_domain(struct intel_uncore *uncore, u32 offset)
891 const struct intel_forcewake_range *entry;
893 entry = BSEARCH(offset,
894 uncore->fw_domains_table,
895 uncore->fw_domains_table_entries,
902 * The list of FW domains depends on the SKU in gen11+ so we
903 * can't determine it statically. We use FORCEWAKE_ALL and
904 * translate it here to the list of available domains.
906 if (entry->domains == FORCEWAKE_ALL)
907 return uncore->fw_domains;
909 drm_WARN(&uncore->i915->drm, entry->domains & ~uncore->fw_domains,
910 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
911 entry->domains & ~uncore->fw_domains, offset);
913 return entry->domains;
916 #define GEN_FW_RANGE(s, e, d) \
917 { .start = (s), .end = (e), .domains = (d) }
919 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
920 static const struct intel_forcewake_range __vlv_fw_ranges[] = {
921 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
922 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
923 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
924 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
925 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
926 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
927 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
930 #define __fwtable_reg_read_fw_domains(uncore, offset) \
932 enum forcewake_domains __fwd = 0; \
933 if (NEEDS_FORCE_WAKE((offset))) \
934 __fwd = find_fw_domain(uncore, offset); \
938 /* *Must* be sorted by offset! See intel_shadow_table_check(). */
939 static const struct i915_range gen8_shadowed_regs[] = {
940 { .start = 0x2030, .end = 0x2030 },
941 { .start = 0xA008, .end = 0xA00C },
942 { .start = 0x12030, .end = 0x12030 },
943 { .start = 0x1a030, .end = 0x1a030 },
944 { .start = 0x22030, .end = 0x22030 },
945 /* TODO: Other registers are not yet used */
948 static const struct i915_range gen11_shadowed_regs[] = {
949 { .start = 0x2030, .end = 0x2030 },
950 { .start = 0x2550, .end = 0x2550 },
951 { .start = 0xA008, .end = 0xA00C },
952 { .start = 0x22030, .end = 0x22030 },
953 { .start = 0x22230, .end = 0x22230 },
954 { .start = 0x22510, .end = 0x22550 },
955 { .start = 0x1C0030, .end = 0x1C0030 },
956 { .start = 0x1C0230, .end = 0x1C0230 },
957 { .start = 0x1C0510, .end = 0x1C0550 },
958 { .start = 0x1C4030, .end = 0x1C4030 },
959 { .start = 0x1C4230, .end = 0x1C4230 },
960 { .start = 0x1C4510, .end = 0x1C4550 },
961 { .start = 0x1C8030, .end = 0x1C8030 },
962 { .start = 0x1C8230, .end = 0x1C8230 },
963 { .start = 0x1C8510, .end = 0x1C8550 },
964 { .start = 0x1D0030, .end = 0x1D0030 },
965 { .start = 0x1D0230, .end = 0x1D0230 },
966 { .start = 0x1D0510, .end = 0x1D0550 },
967 { .start = 0x1D4030, .end = 0x1D4030 },
968 { .start = 0x1D4230, .end = 0x1D4230 },
969 { .start = 0x1D4510, .end = 0x1D4550 },
970 { .start = 0x1D8030, .end = 0x1D8030 },
971 { .start = 0x1D8230, .end = 0x1D8230 },
972 { .start = 0x1D8510, .end = 0x1D8550 },
975 static const struct i915_range gen12_shadowed_regs[] = {
976 { .start = 0x2030, .end = 0x2030 },
977 { .start = 0x2510, .end = 0x2550 },
978 { .start = 0xA008, .end = 0xA00C },
979 { .start = 0xA188, .end = 0xA188 },
980 { .start = 0xA278, .end = 0xA278 },
981 { .start = 0xA540, .end = 0xA56C },
982 { .start = 0xC4C8, .end = 0xC4C8 },
983 { .start = 0xC4D4, .end = 0xC4D4 },
984 { .start = 0xC600, .end = 0xC600 },
985 { .start = 0x22030, .end = 0x22030 },
986 { .start = 0x22510, .end = 0x22550 },
987 { .start = 0x1C0030, .end = 0x1C0030 },
988 { .start = 0x1C0510, .end = 0x1C0550 },
989 { .start = 0x1C4030, .end = 0x1C4030 },
990 { .start = 0x1C4510, .end = 0x1C4550 },
991 { .start = 0x1C8030, .end = 0x1C8030 },
992 { .start = 0x1C8510, .end = 0x1C8550 },
993 { .start = 0x1D0030, .end = 0x1D0030 },
994 { .start = 0x1D0510, .end = 0x1D0550 },
995 { .start = 0x1D4030, .end = 0x1D4030 },
996 { .start = 0x1D4510, .end = 0x1D4550 },
997 { .start = 0x1D8030, .end = 0x1D8030 },
998 { .start = 0x1D8510, .end = 0x1D8550 },
1001 * The rest of these ranges are specific to Xe_HP and beyond, but
1002 * are reserved/unused ranges on earlier gen12 platforms, so they can
1003 * be safely added to the gen12 table.
1005 { .start = 0x1E0030, .end = 0x1E0030 },
1006 { .start = 0x1E0510, .end = 0x1E0550 },
1007 { .start = 0x1E4030, .end = 0x1E4030 },
1008 { .start = 0x1E4510, .end = 0x1E4550 },
1009 { .start = 0x1E8030, .end = 0x1E8030 },
1010 { .start = 0x1E8510, .end = 0x1E8550 },
1011 { .start = 0x1F0030, .end = 0x1F0030 },
1012 { .start = 0x1F0510, .end = 0x1F0550 },
1013 { .start = 0x1F4030, .end = 0x1F4030 },
1014 { .start = 0x1F4510, .end = 0x1F4550 },
1015 { .start = 0x1F8030, .end = 0x1F8030 },
1016 { .start = 0x1F8510, .end = 0x1F8550 },
1019 static const struct i915_range dg2_shadowed_regs[] = {
1020 { .start = 0x2030, .end = 0x2030 },
1021 { .start = 0x2510, .end = 0x2550 },
1022 { .start = 0xA008, .end = 0xA00C },
1023 { .start = 0xA188, .end = 0xA188 },
1024 { .start = 0xA278, .end = 0xA278 },
1025 { .start = 0xA540, .end = 0xA56C },
1026 { .start = 0xC4C8, .end = 0xC4C8 },
1027 { .start = 0xC4E0, .end = 0xC4E0 },
1028 { .start = 0xC600, .end = 0xC600 },
1029 { .start = 0xC658, .end = 0xC658 },
1030 { .start = 0x22030, .end = 0x22030 },
1031 { .start = 0x22510, .end = 0x22550 },
1032 { .start = 0x1C0030, .end = 0x1C0030 },
1033 { .start = 0x1C0510, .end = 0x1C0550 },
1034 { .start = 0x1C4030, .end = 0x1C4030 },
1035 { .start = 0x1C4510, .end = 0x1C4550 },
1036 { .start = 0x1C8030, .end = 0x1C8030 },
1037 { .start = 0x1C8510, .end = 0x1C8550 },
1038 { .start = 0x1D0030, .end = 0x1D0030 },
1039 { .start = 0x1D0510, .end = 0x1D0550 },
1040 { .start = 0x1D4030, .end = 0x1D4030 },
1041 { .start = 0x1D4510, .end = 0x1D4550 },
1042 { .start = 0x1D8030, .end = 0x1D8030 },
1043 { .start = 0x1D8510, .end = 0x1D8550 },
1044 { .start = 0x1E0030, .end = 0x1E0030 },
1045 { .start = 0x1E0510, .end = 0x1E0550 },
1046 { .start = 0x1E4030, .end = 0x1E4030 },
1047 { .start = 0x1E4510, .end = 0x1E4550 },
1048 { .start = 0x1E8030, .end = 0x1E8030 },
1049 { .start = 0x1E8510, .end = 0x1E8550 },
1050 { .start = 0x1F0030, .end = 0x1F0030 },
1051 { .start = 0x1F0510, .end = 0x1F0550 },
1052 { .start = 0x1F4030, .end = 0x1F4030 },
1053 { .start = 0x1F4510, .end = 0x1F4550 },
1054 { .start = 0x1F8030, .end = 0x1F8030 },
1055 { .start = 0x1F8510, .end = 0x1F8550 },
1058 static int mmio_range_cmp(u32 key, const struct i915_range *range)
1060 if (key < range->start)
1062 else if (key > range->end)
1068 static bool is_shadowed(struct intel_uncore *uncore, u32 offset)
1070 if (drm_WARN_ON(&uncore->i915->drm, !uncore->shadowed_reg_table))
1073 return BSEARCH(offset,
1074 uncore->shadowed_reg_table,
1075 uncore->shadowed_reg_table_entries,
1079 static enum forcewake_domains
1080 gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
1082 return FORCEWAKE_RENDER;
1085 static const struct intel_forcewake_range __gen6_fw_ranges[] = {
1086 GEN_FW_RANGE(0x0, 0x3ffff, FORCEWAKE_RENDER),
1089 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
1090 static const struct intel_forcewake_range __chv_fw_ranges[] = {
1091 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
1092 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1093 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1094 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1095 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1096 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1097 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
1098 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1099 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1100 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
1101 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
1102 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1103 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1104 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
1105 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
1106 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
1109 #define __fwtable_reg_write_fw_domains(uncore, offset) \
1111 enum forcewake_domains __fwd = 0; \
1112 const u32 __offset = (offset); \
1113 if (NEEDS_FORCE_WAKE((__offset)) && !is_shadowed(uncore, __offset)) \
1114 __fwd = find_fw_domain(uncore, __offset); \
1118 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
1119 static const struct intel_forcewake_range __gen9_fw_ranges[] = {
1120 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_GT),
1121 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
1122 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1123 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1124 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1125 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT),
1126 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1127 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_GT),
1128 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
1129 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1130 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT),
1131 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1132 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT),
1133 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
1134 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_GT),
1135 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1136 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_GT),
1137 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1138 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT),
1139 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1140 GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_GT),
1141 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
1142 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_GT),
1143 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
1144 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_GT),
1145 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1146 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_GT),
1147 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
1148 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_GT),
1149 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
1150 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_GT),
1151 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
1154 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
1155 static const struct intel_forcewake_range __gen11_fw_ranges[] = {
1156 GEN_FW_RANGE(0x0, 0x1fff, 0), /* uncore range */
1157 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1158 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1159 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1160 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT),
1161 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1162 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1163 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1164 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT),
1165 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1166 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT),
1167 GEN_FW_RANGE(0x8800, 0x8bff, 0),
1168 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1169 GEN_FW_RANGE(0x8d00, 0x94cf, FORCEWAKE_GT),
1170 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1171 GEN_FW_RANGE(0x9560, 0x95ff, 0),
1172 GEN_FW_RANGE(0x9600, 0xafff, FORCEWAKE_GT),
1173 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1174 GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_GT),
1175 GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER),
1176 GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_GT),
1177 GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER),
1178 GEN_FW_RANGE(0x1a000, 0x23fff, FORCEWAKE_GT),
1179 GEN_FW_RANGE(0x24000, 0x2407f, 0),
1180 GEN_FW_RANGE(0x24080, 0x2417f, FORCEWAKE_GT),
1181 GEN_FW_RANGE(0x24180, 0x242ff, FORCEWAKE_RENDER),
1182 GEN_FW_RANGE(0x24300, 0x243ff, FORCEWAKE_GT),
1183 GEN_FW_RANGE(0x24400, 0x24fff, FORCEWAKE_RENDER),
1184 GEN_FW_RANGE(0x25000, 0x3ffff, FORCEWAKE_GT),
1185 GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1186 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
1187 GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
1188 GEN_FW_RANGE(0x1c8000, 0x1cffff, FORCEWAKE_MEDIA_VEBOX0),
1189 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
1190 GEN_FW_RANGE(0x1d4000, 0x1dbfff, 0)
1194 * *Must* be sorted by offset ranges! See intel_fw_table_check().
1196 * Note that the spec lists several reserved/unused ranges that don't
1197 * actually contain any registers. In the table below we'll combine those
1198 * reserved ranges with either the preceding or following range to keep the
1199 * table small and lookups fast.
1201 static const struct intel_forcewake_range __gen12_fw_ranges[] = {
1202 GEN_FW_RANGE(0x0, 0x1fff, 0), /*
1203 0x0 - 0xaff: reserved
1204 0xb00 - 0x1fff: always on */
1205 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1206 GEN_FW_RANGE(0x2700, 0x27ff, FORCEWAKE_GT),
1207 GEN_FW_RANGE(0x2800, 0x2aff, FORCEWAKE_RENDER),
1208 GEN_FW_RANGE(0x2b00, 0x2fff, FORCEWAKE_GT),
1209 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1210 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT), /*
1212 0x4900 - 0x51ff: reserved */
1213 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), /*
1214 0x5200 - 0x53ff: render
1215 0x5400 - 0x54ff: reserved
1216 0x5500 - 0x7fff: render */
1217 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1218 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1219 GEN_FW_RANGE(0x8160, 0x81ff, 0), /*
1220 0x8160 - 0x817f: reserved
1221 0x8180 - 0x81ff: always on */
1222 GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT),
1223 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1224 GEN_FW_RANGE(0x8500, 0x94cf, FORCEWAKE_GT), /*
1226 0x8800 - 0x8fff: reserved
1228 0x9480 - 0x94cf: reserved */
1229 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1230 GEN_FW_RANGE(0x9560, 0x97ff, 0), /*
1231 0x9560 - 0x95ff: always on
1232 0x9600 - 0x97ff: reserved */
1233 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT),
1234 GEN_FW_RANGE(0xb000, 0xb3ff, FORCEWAKE_RENDER),
1235 GEN_FW_RANGE(0xb400, 0xcfff, FORCEWAKE_GT), /*
1237 0xb480 - 0xbfff: reserved
1238 0xc000 - 0xcfff: gt */
1239 GEN_FW_RANGE(0xd000, 0xd7ff, 0),
1240 GEN_FW_RANGE(0xd800, 0xd8ff, FORCEWAKE_RENDER),
1241 GEN_FW_RANGE(0xd900, 0xdbff, FORCEWAKE_GT),
1242 GEN_FW_RANGE(0xdc00, 0xefff, FORCEWAKE_RENDER), /*
1243 0xdc00 - 0xddff: render
1244 0xde00 - 0xde7f: reserved
1245 0xde80 - 0xe8ff: render
1246 0xe900 - 0xefff: reserved */
1247 GEN_FW_RANGE(0xf000, 0x147ff, FORCEWAKE_GT), /*
1249 0x10000 - 0x147ff: reserved */
1250 GEN_FW_RANGE(0x14800, 0x1ffff, FORCEWAKE_RENDER), /*
1251 0x14800 - 0x14fff: render
1252 0x15000 - 0x16dff: reserved
1253 0x16e00 - 0x1bfff: render
1254 0x1c000 - 0x1ffff: reserved */
1255 GEN_FW_RANGE(0x20000, 0x20fff, FORCEWAKE_MEDIA_VDBOX0),
1256 GEN_FW_RANGE(0x21000, 0x21fff, FORCEWAKE_MEDIA_VDBOX2),
1257 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
1258 GEN_FW_RANGE(0x24000, 0x2417f, 0), /*
1259 0x24000 - 0x2407f: always on
1260 0x24080 - 0x2417f: reserved */
1261 GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /*
1262 0x24180 - 0x241ff: gt
1263 0x24200 - 0x249ff: reserved */
1264 GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /*
1265 0x24a00 - 0x24a7f: render
1266 0x24a80 - 0x251ff: reserved */
1267 GEN_FW_RANGE(0x25200, 0x255ff, FORCEWAKE_GT), /*
1268 0x25200 - 0x252ff: gt
1269 0x25300 - 0x255ff: reserved */
1270 GEN_FW_RANGE(0x25600, 0x2567f, FORCEWAKE_MEDIA_VDBOX0),
1271 GEN_FW_RANGE(0x25680, 0x259ff, FORCEWAKE_MEDIA_VDBOX2), /*
1272 0x25680 - 0x256ff: VD2
1273 0x25700 - 0x259ff: reserved */
1274 GEN_FW_RANGE(0x25a00, 0x25a7f, FORCEWAKE_MEDIA_VDBOX0),
1275 GEN_FW_RANGE(0x25a80, 0x2ffff, FORCEWAKE_MEDIA_VDBOX2), /*
1276 0x25a80 - 0x25aff: VD2
1277 0x25b00 - 0x2ffff: reserved */
1278 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
1279 GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1280 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /*
1281 0x1c0000 - 0x1c2bff: VD0
1282 0x1c2c00 - 0x1c2cff: reserved
1283 0x1c2d00 - 0x1c2dff: VD0
1284 0x1c2e00 - 0x1c3eff: reserved
1285 0x1c3f00 - 0x1c3fff: VD0 */
1286 GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
1287 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*
1288 0x1c8000 - 0x1ca0ff: VE0
1289 0x1ca100 - 0x1cbeff: reserved
1290 0x1cbf00 - 0x1cbfff: VE0 */
1291 GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX0), /*
1292 0x1cc000 - 0x1ccfff: VD0
1293 0x1cd000 - 0x1cffff: reserved */
1294 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /*
1295 0x1d0000 - 0x1d2bff: VD2
1296 0x1d2c00 - 0x1d2cff: reserved
1297 0x1d2d00 - 0x1d2dff: VD2
1298 0x1d2e00 - 0x1d3eff: reserved
1299 0x1d3f00 - 0x1d3fff: VD2 */
1303 * Graphics IP version 12.55 brings a slight change to the 0xd800 range,
1304 * switching it from the GT domain to the render domain.
1306 * *Must* be sorted by offset ranges! See intel_fw_table_check().
1308 #define XEHP_FWRANGES(FW_RANGE_D800) \
1309 GEN_FW_RANGE(0x0, 0x1fff, 0), /* \
1310 0x0 - 0xaff: reserved \
1311 0xb00 - 0x1fff: always on */ \
1312 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), \
1313 GEN_FW_RANGE(0x2700, 0x4aff, FORCEWAKE_GT), \
1314 GEN_FW_RANGE(0x4b00, 0x51ff, 0), /* \
1315 0x4b00 - 0x4fff: reserved \
1316 0x5000 - 0x51ff: always on */ \
1317 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), \
1318 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT), \
1319 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), \
1320 GEN_FW_RANGE(0x8160, 0x81ff, 0), /* \
1321 0x8160 - 0x817f: reserved \
1322 0x8180 - 0x81ff: always on */ \
1323 GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT), \
1324 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), \
1325 GEN_FW_RANGE(0x8500, 0x8cff, FORCEWAKE_GT), /* \
1326 0x8500 - 0x87ff: gt \
1327 0x8800 - 0x8c7f: reserved \
1328 0x8c80 - 0x8cff: gt (DG2 only) */ \
1329 GEN_FW_RANGE(0x8d00, 0x8fff, FORCEWAKE_RENDER), /* \
1330 0x8d00 - 0x8dff: render (DG2 only) \
1331 0x8e00 - 0x8fff: reserved */ \
1332 GEN_FW_RANGE(0x9000, 0x94cf, FORCEWAKE_GT), /* \
1333 0x9000 - 0x947f: gt \
1334 0x9480 - 0x94cf: reserved */ \
1335 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER), \
1336 GEN_FW_RANGE(0x9560, 0x967f, 0), /* \
1337 0x9560 - 0x95ff: always on \
1338 0x9600 - 0x967f: reserved */ \
1339 GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER), /* \
1340 0x9680 - 0x96ff: render (DG2 only) \
1341 0x9700 - 0x97ff: reserved */ \
1342 GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /* \
1343 0x9800 - 0xb4ff: gt \
1344 0xb500 - 0xbfff: reserved \
1345 0xc000 - 0xcfff: gt */ \
1346 GEN_FW_RANGE(0xd000, 0xd7ff, 0), \
1347 GEN_FW_RANGE(0xd800, 0xd87f, FW_RANGE_D800), \
1348 GEN_FW_RANGE(0xd880, 0xdbff, FORCEWAKE_GT), \
1349 GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER), \
1350 GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /* \
1351 0xdd00 - 0xddff: gt \
1352 0xde00 - 0xde7f: reserved */ \
1353 GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /* \
1354 0xde80 - 0xdfff: render \
1355 0xe000 - 0xe0ff: reserved \
1356 0xe100 - 0xe8ff: render */ \
1357 GEN_FW_RANGE(0xe900, 0xffff, FORCEWAKE_GT), /* \
1358 0xe900 - 0xe9ff: gt \
1359 0xea00 - 0xefff: reserved \
1360 0xf000 - 0xffff: gt */ \
1361 GEN_FW_RANGE(0x10000, 0x12fff, 0), /* \
1362 0x10000 - 0x11fff: reserved \
1363 0x12000 - 0x127ff: always on \
1364 0x12800 - 0x12fff: reserved */ \
1365 GEN_FW_RANGE(0x13000, 0x131ff, FORCEWAKE_MEDIA_VDBOX0), /* DG2 only */ \
1366 GEN_FW_RANGE(0x13200, 0x13fff, FORCEWAKE_MEDIA_VDBOX2), /* \
1367 0x13200 - 0x133ff: VD2 (DG2 only) \
1368 0x13400 - 0x13fff: reserved */ \
1369 GEN_FW_RANGE(0x14000, 0x141ff, FORCEWAKE_MEDIA_VDBOX0), /* XEHPSDV only */ \
1370 GEN_FW_RANGE(0x14200, 0x143ff, FORCEWAKE_MEDIA_VDBOX2), /* XEHPSDV only */ \
1371 GEN_FW_RANGE(0x14400, 0x145ff, FORCEWAKE_MEDIA_VDBOX4), /* XEHPSDV only */ \
1372 GEN_FW_RANGE(0x14600, 0x147ff, FORCEWAKE_MEDIA_VDBOX6), /* XEHPSDV only */ \
1373 GEN_FW_RANGE(0x14800, 0x14fff, FORCEWAKE_RENDER), \
1374 GEN_FW_RANGE(0x15000, 0x16dff, FORCEWAKE_GT), /* \
1375 0x15000 - 0x15fff: gt (DG2 only) \
1376 0x16000 - 0x16dff: reserved */ \
1377 GEN_FW_RANGE(0x16e00, 0x1ffff, FORCEWAKE_RENDER), \
1378 GEN_FW_RANGE(0x20000, 0x21fff, FORCEWAKE_MEDIA_VDBOX0), /* \
1379 0x20000 - 0x20fff: VD0 (XEHPSDV only) \
1380 0x21000 - 0x21fff: reserved */ \
1381 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT), \
1382 GEN_FW_RANGE(0x24000, 0x2417f, 0), /* \
1383 0x24000 - 0x2407f: always on \
1384 0x24080 - 0x2417f: reserved */ \
1385 GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /* \
1386 0x24180 - 0x241ff: gt \
1387 0x24200 - 0x249ff: reserved */ \
1388 GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /* \
1389 0x24a00 - 0x24a7f: render \
1390 0x24a80 - 0x251ff: reserved */ \
1391 GEN_FW_RANGE(0x25200, 0x25fff, FORCEWAKE_GT), /* \
1392 0x25200 - 0x252ff: gt \
1393 0x25300 - 0x25fff: reserved */ \
1394 GEN_FW_RANGE(0x26000, 0x2ffff, FORCEWAKE_RENDER), /* \
1395 0x26000 - 0x27fff: render \
1396 0x28000 - 0x29fff: reserved \
1397 0x2a000 - 0x2ffff: undocumented */ \
1398 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT), \
1399 GEN_FW_RANGE(0x40000, 0x1bffff, 0), \
1400 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /* \
1401 0x1c0000 - 0x1c2bff: VD0 \
1402 0x1c2c00 - 0x1c2cff: reserved \
1403 0x1c2d00 - 0x1c2dff: VD0 \
1404 0x1c2e00 - 0x1c3eff: VD0 (DG2 only) \
1405 0x1c3f00 - 0x1c3fff: VD0 */ \
1406 GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), /* \
1407 0x1c4000 - 0x1c6bff: VD1 \
1408 0x1c6c00 - 0x1c6cff: reserved \
1409 0x1c6d00 - 0x1c6dff: VD1 \
1410 0x1c6e00 - 0x1c7fff: reserved */ \
1411 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /* \
1412 0x1c8000 - 0x1ca0ff: VE0 \
1413 0x1ca100 - 0x1cbfff: reserved */ \
1414 GEN_FW_RANGE(0x1cc000, 0x1ccfff, FORCEWAKE_MEDIA_VDBOX0), \
1415 GEN_FW_RANGE(0x1cd000, 0x1cdfff, FORCEWAKE_MEDIA_VDBOX2), \
1416 GEN_FW_RANGE(0x1ce000, 0x1cefff, FORCEWAKE_MEDIA_VDBOX4), \
1417 GEN_FW_RANGE(0x1cf000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX6), \
1418 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /* \
1419 0x1d0000 - 0x1d2bff: VD2 \
1420 0x1d2c00 - 0x1d2cff: reserved \
1421 0x1d2d00 - 0x1d2dff: VD2 \
1422 0x1d2e00 - 0x1d3dff: VD2 (DG2 only) \
1423 0x1d3e00 - 0x1d3eff: reserved \
1424 0x1d3f00 - 0x1d3fff: VD2 */ \
1425 GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), /* \
1426 0x1d4000 - 0x1d6bff: VD3 \
1427 0x1d6c00 - 0x1d6cff: reserved \
1428 0x1d6d00 - 0x1d6dff: VD3 \
1429 0x1d6e00 - 0x1d7fff: reserved */ \
1430 GEN_FW_RANGE(0x1d8000, 0x1dffff, FORCEWAKE_MEDIA_VEBOX1), /* \
1431 0x1d8000 - 0x1da0ff: VE1 \
1432 0x1da100 - 0x1dffff: reserved */ \
1433 GEN_FW_RANGE(0x1e0000, 0x1e3fff, FORCEWAKE_MEDIA_VDBOX4), /* \
1434 0x1e0000 - 0x1e2bff: VD4 \
1435 0x1e2c00 - 0x1e2cff: reserved \
1436 0x1e2d00 - 0x1e2dff: VD4 \
1437 0x1e2e00 - 0x1e3eff: reserved \
1438 0x1e3f00 - 0x1e3fff: VD4 */ \
1439 GEN_FW_RANGE(0x1e4000, 0x1e7fff, FORCEWAKE_MEDIA_VDBOX5), /* \
1440 0x1e4000 - 0x1e6bff: VD5 \
1441 0x1e6c00 - 0x1e6cff: reserved \
1442 0x1e6d00 - 0x1e6dff: VD5 \
1443 0x1e6e00 - 0x1e7fff: reserved */ \
1444 GEN_FW_RANGE(0x1e8000, 0x1effff, FORCEWAKE_MEDIA_VEBOX2), /* \
1445 0x1e8000 - 0x1ea0ff: VE2 \
1446 0x1ea100 - 0x1effff: reserved */ \
1447 GEN_FW_RANGE(0x1f0000, 0x1f3fff, FORCEWAKE_MEDIA_VDBOX6), /* \
1448 0x1f0000 - 0x1f2bff: VD6 \
1449 0x1f2c00 - 0x1f2cff: reserved \
1450 0x1f2d00 - 0x1f2dff: VD6 \
1451 0x1f2e00 - 0x1f3eff: reserved \
1452 0x1f3f00 - 0x1f3fff: VD6 */ \
1453 GEN_FW_RANGE(0x1f4000, 0x1f7fff, FORCEWAKE_MEDIA_VDBOX7), /* \
1454 0x1f4000 - 0x1f6bff: VD7 \
1455 0x1f6c00 - 0x1f6cff: reserved \
1456 0x1f6d00 - 0x1f6dff: VD7 \
1457 0x1f6e00 - 0x1f7fff: reserved */ \
1458 GEN_FW_RANGE(0x1f8000, 0x1fa0ff, FORCEWAKE_MEDIA_VEBOX3),
1460 static const struct intel_forcewake_range __xehp_fw_ranges[] = {
1461 XEHP_FWRANGES(FORCEWAKE_GT)
1464 static const struct intel_forcewake_range __dg2_fw_ranges[] = {
1465 XEHP_FWRANGES(FORCEWAKE_RENDER)
1469 ilk_dummy_write(struct intel_uncore *uncore)
1471 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
1472 * the chip from rc6 before touching it for real. MI_MODE is masked,
1473 * hence harmless to write 0 into. */
1474 __raw_uncore_write32(uncore, MI_MODE, 0);
1478 __unclaimed_reg_debug(struct intel_uncore *uncore,
1479 const i915_reg_t reg,
1483 if (drm_WARN(&uncore->i915->drm,
1484 check_for_unclaimed_mmio(uncore) && !before,
1485 "Unclaimed %s register 0x%x\n",
1486 read ? "read from" : "write to",
1487 i915_mmio_reg_offset(reg)))
1488 /* Only report the first N failures */
1489 uncore->i915->params.mmio_debug--;
1493 unclaimed_reg_debug(struct intel_uncore *uncore,
1494 const i915_reg_t reg,
1498 if (likely(!uncore->i915->params.mmio_debug))
1501 /* interrupts are disabled and re-enabled around uncore->lock usage */
1502 lockdep_assert_held(&uncore->lock);
1505 spin_lock(&uncore->debug->lock);
1507 __unclaimed_reg_debug(uncore, reg, read, before);
1510 spin_unlock(&uncore->debug->lock);
1513 #define __vgpu_read(x) \
1515 vgpu_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1516 u##x val = __raw_uncore_read##x(uncore, reg); \
1517 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1525 #define GEN2_READ_HEADER(x) \
1527 assert_rpm_wakelock_held(uncore->rpm);
1529 #define GEN2_READ_FOOTER \
1530 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1533 #define __gen2_read(x) \
1535 gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1536 GEN2_READ_HEADER(x); \
1537 val = __raw_uncore_read##x(uncore, reg); \
1541 #define __gen5_read(x) \
1543 gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1544 GEN2_READ_HEADER(x); \
1545 ilk_dummy_write(uncore); \
1546 val = __raw_uncore_read##x(uncore, reg); \
1562 #undef GEN2_READ_FOOTER
1563 #undef GEN2_READ_HEADER
1565 #define GEN6_READ_HEADER(x) \
1566 u32 offset = i915_mmio_reg_offset(reg); \
1567 unsigned long irqflags; \
1569 assert_rpm_wakelock_held(uncore->rpm); \
1570 spin_lock_irqsave(&uncore->lock, irqflags); \
1571 unclaimed_reg_debug(uncore, reg, true, true)
1573 #define GEN6_READ_FOOTER \
1574 unclaimed_reg_debug(uncore, reg, true, false); \
1575 spin_unlock_irqrestore(&uncore->lock, irqflags); \
1576 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1579 static noinline void ___force_wake_auto(struct intel_uncore *uncore,
1580 enum forcewake_domains fw_domains)
1582 struct intel_uncore_forcewake_domain *domain;
1585 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
1587 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)
1588 fw_domain_arm_timer(domain);
1590 uncore->funcs.force_wake_get(uncore, fw_domains);
1593 static inline void __force_wake_auto(struct intel_uncore *uncore,
1594 enum forcewake_domains fw_domains)
1596 GEM_BUG_ON(!fw_domains);
1598 /* Turn on all requested but inactive supported forcewake domains. */
1599 fw_domains &= uncore->fw_domains;
1600 fw_domains &= ~uncore->fw_domains_active;
1603 ___force_wake_auto(uncore, fw_domains);
1606 #define __gen_fwtable_read(x) \
1608 fwtable_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) \
1610 enum forcewake_domains fw_engine; \
1611 GEN6_READ_HEADER(x); \
1612 fw_engine = __fwtable_reg_read_fw_domains(uncore, offset); \
1614 __force_wake_auto(uncore, fw_engine); \
1615 val = __raw_uncore_read##x(uncore, reg); \
1619 static enum forcewake_domains
1620 fwtable_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) {
1621 return __fwtable_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg));
1624 __gen_fwtable_read(8)
1625 __gen_fwtable_read(16)
1626 __gen_fwtable_read(32)
1627 __gen_fwtable_read(64)
1629 #undef __gen_fwtable_read
1630 #undef GEN6_READ_FOOTER
1631 #undef GEN6_READ_HEADER
1633 #define GEN2_WRITE_HEADER \
1634 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1635 assert_rpm_wakelock_held(uncore->rpm); \
1637 #define GEN2_WRITE_FOOTER
1639 #define __gen2_write(x) \
1641 gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1642 GEN2_WRITE_HEADER; \
1643 __raw_uncore_write##x(uncore, reg, val); \
1644 GEN2_WRITE_FOOTER; \
1647 #define __gen5_write(x) \
1649 gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1650 GEN2_WRITE_HEADER; \
1651 ilk_dummy_write(uncore); \
1652 __raw_uncore_write##x(uncore, reg, val); \
1653 GEN2_WRITE_FOOTER; \
1666 #undef GEN2_WRITE_FOOTER
1667 #undef GEN2_WRITE_HEADER
1669 #define GEN6_WRITE_HEADER \
1670 u32 offset = i915_mmio_reg_offset(reg); \
1671 unsigned long irqflags; \
1672 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1673 assert_rpm_wakelock_held(uncore->rpm); \
1674 spin_lock_irqsave(&uncore->lock, irqflags); \
1675 unclaimed_reg_debug(uncore, reg, false, true)
1677 #define GEN6_WRITE_FOOTER \
1678 unclaimed_reg_debug(uncore, reg, false, false); \
1679 spin_unlock_irqrestore(&uncore->lock, irqflags)
1681 #define __gen6_write(x) \
1683 gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1684 GEN6_WRITE_HEADER; \
1685 if (NEEDS_FORCE_WAKE(offset)) \
1686 __gen6_gt_wait_for_fifo(uncore); \
1687 __raw_uncore_write##x(uncore, reg, val); \
1688 GEN6_WRITE_FOOTER; \
1694 #define __gen_fwtable_write(x) \
1696 fwtable_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1697 enum forcewake_domains fw_engine; \
1698 GEN6_WRITE_HEADER; \
1699 fw_engine = __fwtable_reg_write_fw_domains(uncore, offset); \
1701 __force_wake_auto(uncore, fw_engine); \
1702 __raw_uncore_write##x(uncore, reg, val); \
1703 GEN6_WRITE_FOOTER; \
1706 static enum forcewake_domains
1707 fwtable_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
1709 return __fwtable_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg));
1712 __gen_fwtable_write(8)
1713 __gen_fwtable_write(16)
1714 __gen_fwtable_write(32)
1716 #undef __gen_fwtable_write
1717 #undef GEN6_WRITE_FOOTER
1718 #undef GEN6_WRITE_HEADER
1720 #define __vgpu_write(x) \
1722 vgpu_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1723 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1724 __raw_uncore_write##x(uncore, reg, val); \
1730 #define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \
1732 (uncore)->funcs.mmio_writeb = x##_write8; \
1733 (uncore)->funcs.mmio_writew = x##_write16; \
1734 (uncore)->funcs.mmio_writel = x##_write32; \
1737 #define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x) \
1739 (uncore)->funcs.mmio_readb = x##_read8; \
1740 (uncore)->funcs.mmio_readw = x##_read16; \
1741 (uncore)->funcs.mmio_readl = x##_read32; \
1742 (uncore)->funcs.mmio_readq = x##_read64; \
1745 #define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
1747 ASSIGN_RAW_WRITE_MMIO_VFUNCS((uncore), x); \
1748 (uncore)->funcs.write_fw_domains = x##_reg_write_fw_domains; \
1751 #define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
1753 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x); \
1754 (uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \
1757 static int __fw_domain_init(struct intel_uncore *uncore,
1758 enum forcewake_domain_id domain_id,
1762 struct intel_uncore_forcewake_domain *d;
1764 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
1765 GEM_BUG_ON(uncore->fw_domain[domain_id]);
1767 if (i915_inject_probe_failure(uncore->i915))
1770 d = kzalloc(sizeof(*d), GFP_KERNEL);
1774 drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_set));
1775 drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_ack));
1779 d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set);
1780 d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack);
1784 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
1785 BUILD_BUG_ON(FORCEWAKE_GT != (1 << FW_DOMAIN_ID_GT));
1786 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
1787 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0));
1788 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1));
1789 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2));
1790 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3));
1791 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX4 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX4));
1792 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX5 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX5));
1793 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX6 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX6));
1794 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX7 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX7));
1795 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0));
1796 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1));
1797 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX2));
1798 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX3));
1800 d->mask = BIT(domain_id);
1802 hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1803 d->timer.function = intel_uncore_fw_release_timer;
1805 uncore->fw_domains |= BIT(domain_id);
1809 uncore->fw_domain[domain_id] = d;
1814 static void fw_domain_fini(struct intel_uncore *uncore,
1815 enum forcewake_domain_id domain_id)
1817 struct intel_uncore_forcewake_domain *d;
1819 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
1821 d = fetch_and_zero(&uncore->fw_domain[domain_id]);
1825 uncore->fw_domains &= ~BIT(domain_id);
1826 drm_WARN_ON(&uncore->i915->drm, d->wake_count);
1827 drm_WARN_ON(&uncore->i915->drm, hrtimer_cancel(&d->timer));
1831 static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore)
1833 struct intel_uncore_forcewake_domain *d;
1836 for_each_fw_domain(d, uncore, tmp)
1837 fw_domain_fini(uncore, d->id);
1840 static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
1842 struct drm_i915_private *i915 = uncore->i915;
1845 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
1847 #define fw_domain_init(uncore__, id__, set__, ack__) \
1848 (ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__))))
1850 if (GRAPHICS_VER(i915) >= 11) {
1851 /* we'll prune the domains of missing engines later */
1852 intel_engine_mask_t emask = INTEL_INFO(i915)->platform_engine_mask;
1855 uncore->funcs.force_wake_get = fw_domains_get_with_fallback;
1856 uncore->funcs.force_wake_put = fw_domains_put;
1857 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1858 FORCEWAKE_RENDER_GEN9,
1859 FORCEWAKE_ACK_RENDER_GEN9);
1860 fw_domain_init(uncore, FW_DOMAIN_ID_GT,
1862 FORCEWAKE_ACK_GT_GEN9);
1864 for (i = 0; i < I915_MAX_VCS; i++) {
1865 if (!__HAS_ENGINE(emask, _VCS(i)))
1868 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
1869 FORCEWAKE_MEDIA_VDBOX_GEN11(i),
1870 FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
1872 for (i = 0; i < I915_MAX_VECS; i++) {
1873 if (!__HAS_ENGINE(emask, _VECS(i)))
1876 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
1877 FORCEWAKE_MEDIA_VEBOX_GEN11(i),
1878 FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
1880 } else if (IS_GRAPHICS_VER(i915, 9, 10)) {
1881 uncore->funcs.force_wake_get = fw_domains_get_with_fallback;
1882 uncore->funcs.force_wake_put = fw_domains_put;
1883 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1884 FORCEWAKE_RENDER_GEN9,
1885 FORCEWAKE_ACK_RENDER_GEN9);
1886 fw_domain_init(uncore, FW_DOMAIN_ID_GT,
1888 FORCEWAKE_ACK_GT_GEN9);
1889 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
1890 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
1891 } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
1892 uncore->funcs.force_wake_get = fw_domains_get;
1893 uncore->funcs.force_wake_put = fw_domains_put;
1894 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1895 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1896 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
1897 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1898 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
1899 uncore->funcs.force_wake_get =
1900 fw_domains_get_with_thread_status;
1901 uncore->funcs.force_wake_put = fw_domains_put;
1902 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1903 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1904 } else if (IS_IVYBRIDGE(i915)) {
1907 /* IVB configs may use multi-threaded forcewake */
1909 /* A small trick here - if the bios hasn't configured
1910 * MT forcewake, and if the device is in RC6, then
1911 * force_wake_mt_get will not wake the device and the
1912 * ECOBUS read will return zero. Which will be
1913 * (correctly) interpreted by the test below as MT
1914 * forcewake being disabled.
1916 uncore->funcs.force_wake_get =
1917 fw_domains_get_with_thread_status;
1918 uncore->funcs.force_wake_put = fw_domains_put;
1920 /* We need to init first for ECOBUS access and then
1921 * determine later if we want to reinit, in case of MT access is
1922 * not working. In this stage we don't know which flavour this
1923 * ivb is, so it is better to reset also the gen6 fw registers
1924 * before the ecobus check.
1927 __raw_uncore_write32(uncore, FORCEWAKE, 0);
1928 __raw_posting_read(uncore, ECOBUS);
1930 ret = __fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1931 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1935 spin_lock_irq(&uncore->lock);
1936 fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
1937 ecobus = __raw_uncore_read32(uncore, ECOBUS);
1938 fw_domains_put(uncore, FORCEWAKE_RENDER);
1939 spin_unlock_irq(&uncore->lock);
1941 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1942 drm_info(&i915->drm, "No MT forcewake available on Ivybridge, this can result in issues\n");
1943 drm_info(&i915->drm, "when using vblank-synced partial screen updates.\n");
1944 fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER);
1945 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1946 FORCEWAKE, FORCEWAKE_ACK);
1948 } else if (GRAPHICS_VER(i915) == 6) {
1949 uncore->funcs.force_wake_get =
1950 fw_domains_get_with_thread_status;
1951 uncore->funcs.force_wake_put = fw_domains_put;
1952 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1953 FORCEWAKE, FORCEWAKE_ACK);
1956 #undef fw_domain_init
1958 /* All future platforms are expected to require complex power gating */
1959 drm_WARN_ON(&i915->drm, !ret && uncore->fw_domains == 0);
1963 intel_uncore_fw_domains_fini(uncore);
1968 #define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \
1970 (uncore)->fw_domains_table = \
1971 (struct intel_forcewake_range *)(d); \
1972 (uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \
1975 #define ASSIGN_SHADOW_TABLE(uncore, d) \
1977 (uncore)->shadowed_reg_table = d; \
1978 (uncore)->shadowed_reg_table_entries = ARRAY_SIZE((d)); \
1981 static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
1982 unsigned long action, void *data)
1984 struct intel_uncore *uncore = container_of(nb,
1985 struct intel_uncore, pmic_bus_access_nb);
1988 case MBI_PMIC_BUS_ACCESS_BEGIN:
1990 * forcewake all now to make sure that we don't need to do a
1991 * forcewake later which on systems where this notifier gets
1992 * called requires the punit to access to the shared pmic i2c
1993 * bus, which will be busy after this notification, leading to:
1994 * "render: timed out waiting for forcewake ack request."
1997 * The notifier is unregistered during intel_runtime_suspend(),
1998 * so it's ok to access the HW here without holding a RPM
1999 * wake reference -> disable wakeref asserts for the time of
2002 disable_rpm_wakeref_asserts(uncore->rpm);
2003 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
2004 enable_rpm_wakeref_asserts(uncore->rpm);
2006 case MBI_PMIC_BUS_ACCESS_END:
2007 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
2014 static int uncore_mmio_setup(struct intel_uncore *uncore)
2016 struct drm_i915_private *i915 = uncore->i915;
2017 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
2021 mmio_bar = GRAPHICS_VER(i915) == 2 ? 1 : 0;
2023 * Before gen4, the registers and the GTT are behind different BARs.
2024 * However, from gen4 onwards, the registers and the GTT are shared
2025 * in the same BAR, so we want to restrict this ioremap from
2026 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
2027 * the register BAR remains the same size for all the earlier
2028 * generations up to Ironlake.
2029 * For dgfx chips register range is expanded to 4MB.
2031 if (GRAPHICS_VER(i915) < 5)
2032 mmio_size = 512 * 1024;
2033 else if (IS_DGFX(i915))
2034 mmio_size = 4 * 1024 * 1024;
2036 mmio_size = 2 * 1024 * 1024;
2038 uncore->regs = pci_iomap(pdev, mmio_bar, mmio_size);
2039 if (uncore->regs == NULL) {
2040 drm_err(&i915->drm, "failed to map registers\n");
2047 static void uncore_mmio_cleanup(struct intel_uncore *uncore)
2049 struct pci_dev *pdev = to_pci_dev(uncore->i915->drm.dev);
2051 pci_iounmap(pdev, uncore->regs);
2054 void intel_uncore_init_early(struct intel_uncore *uncore,
2055 struct drm_i915_private *i915)
2057 spin_lock_init(&uncore->lock);
2058 uncore->i915 = i915;
2059 uncore->rpm = &i915->runtime_pm;
2060 uncore->debug = &i915->mmio_debug;
2063 static void uncore_raw_init(struct intel_uncore *uncore)
2065 GEM_BUG_ON(intel_uncore_has_forcewake(uncore));
2067 if (intel_vgpu_active(uncore->i915)) {
2068 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, vgpu);
2069 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, vgpu);
2070 } else if (GRAPHICS_VER(uncore->i915) == 5) {
2071 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5);
2072 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5);
2074 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2);
2075 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2);
2079 static int uncore_forcewake_init(struct intel_uncore *uncore)
2081 struct drm_i915_private *i915 = uncore->i915;
2084 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
2086 ret = intel_uncore_fw_domains_init(uncore);
2089 forcewake_early_sanitize(uncore, 0);
2091 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
2093 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
2094 ASSIGN_FW_DOMAINS_TABLE(uncore, __dg2_fw_ranges);
2095 ASSIGN_SHADOW_TABLE(uncore, dg2_shadowed_regs);
2096 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2097 } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
2098 ASSIGN_FW_DOMAINS_TABLE(uncore, __xehp_fw_ranges);
2099 ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs);
2100 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2101 } else if (GRAPHICS_VER(i915) >= 12) {
2102 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges);
2103 ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs);
2104 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2105 } else if (GRAPHICS_VER(i915) == 11) {
2106 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
2107 ASSIGN_SHADOW_TABLE(uncore, gen11_shadowed_regs);
2108 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2109 } else if (IS_GRAPHICS_VER(i915, 9, 10)) {
2110 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
2111 ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
2112 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2113 } else if (IS_CHERRYVIEW(i915)) {
2114 ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
2115 ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
2116 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2117 } else if (GRAPHICS_VER(i915) == 8) {
2118 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges);
2119 ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
2120 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2121 } else if (IS_VALLEYVIEW(i915)) {
2122 ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges);
2123 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
2124 } else if (IS_GRAPHICS_VER(i915, 6, 7)) {
2125 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges);
2126 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
2129 uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier;
2130 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
2135 int intel_uncore_init_mmio(struct intel_uncore *uncore)
2137 struct drm_i915_private *i915 = uncore->i915;
2140 ret = uncore_mmio_setup(uncore);
2145 * The boot firmware initializes local memory and assesses its health.
2146 * If memory training fails, the punit will have been instructed to
2147 * keep the GT powered down; we won't be able to communicate with it
2148 * and we should not continue with driver initialization.
2150 if (IS_DGFX(i915) &&
2151 !(__raw_uncore_read32(uncore, GU_CNTL) & LMEM_INIT)) {
2152 drm_err(&i915->drm, "LMEM not initialized by firmware\n");
2156 if (GRAPHICS_VER(i915) > 5 && !intel_vgpu_active(i915))
2157 uncore->flags |= UNCORE_HAS_FORCEWAKE;
2159 if (!intel_uncore_has_forcewake(uncore)) {
2160 uncore_raw_init(uncore);
2162 ret = uncore_forcewake_init(uncore);
2164 goto out_mmio_cleanup;
2167 /* make sure fw funcs are set if and only if we have fw*/
2168 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_get);
2169 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_put);
2170 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains);
2171 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains);
2173 if (HAS_FPGA_DBG_UNCLAIMED(i915))
2174 uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED;
2176 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
2177 uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED;
2179 if (IS_GRAPHICS_VER(i915, 6, 7))
2180 uncore->flags |= UNCORE_HAS_FIFO;
2182 /* clear out unclaimed reg detection bit */
2183 if (intel_uncore_unclaimed_mmio(uncore))
2184 drm_dbg(&i915->drm, "unclaimed mmio detected on uncore init, clearing\n");
2189 uncore_mmio_cleanup(uncore);
2195 * We might have detected that some engines are fused off after we initialized
2196 * the forcewake domains. Prune them, to make sure they only reference existing
2199 void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
2200 struct intel_gt *gt)
2202 enum forcewake_domains fw_domains = uncore->fw_domains;
2203 enum forcewake_domain_id domain_id;
2206 if (!intel_uncore_has_forcewake(uncore) || GRAPHICS_VER(uncore->i915) < 11)
2209 for (i = 0; i < I915_MAX_VCS; i++) {
2210 domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
2212 if (HAS_ENGINE(gt, _VCS(i)))
2216 * Starting with XeHP, the power well for an even-numbered
2217 * VDBOX is also used for shared units within the
2218 * media slice such as SFC. So even if the engine
2219 * itself is fused off, we still need to initialize
2220 * the forcewake domain if any of the other engines
2221 * in the same media slice are present.
2223 if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 50) && i % 2 == 0) {
2224 if ((i + 1 < I915_MAX_VCS) && HAS_ENGINE(gt, _VCS(i + 1)))
2227 if (HAS_ENGINE(gt, _VECS(i / 2)))
2231 if (fw_domains & BIT(domain_id))
2232 fw_domain_fini(uncore, domain_id);
2235 for (i = 0; i < I915_MAX_VECS; i++) {
2236 domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
2238 if (HAS_ENGINE(gt, _VECS(i)))
2241 if (fw_domains & BIT(domain_id))
2242 fw_domain_fini(uncore, domain_id);
2246 void intel_uncore_fini_mmio(struct intel_uncore *uncore)
2248 if (intel_uncore_has_forcewake(uncore)) {
2249 iosf_mbi_punit_acquire();
2250 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
2251 &uncore->pmic_bus_access_nb);
2252 intel_uncore_forcewake_reset(uncore);
2253 intel_uncore_fw_domains_fini(uncore);
2254 iosf_mbi_punit_release();
2257 uncore_mmio_cleanup(uncore);
2260 static const struct reg_whitelist {
2261 i915_reg_t offset_ldw;
2262 i915_reg_t offset_udw;
2263 u8 min_graphics_ver;
2264 u8 max_graphics_ver;
2266 } reg_read_whitelist[] = { {
2267 .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
2268 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
2269 .min_graphics_ver = 4,
2270 .max_graphics_ver = 12,
2274 int i915_reg_read_ioctl(struct drm_device *dev,
2275 void *data, struct drm_file *file)
2277 struct drm_i915_private *i915 = to_i915(dev);
2278 struct intel_uncore *uncore = &i915->uncore;
2279 struct drm_i915_reg_read *reg = data;
2280 struct reg_whitelist const *entry;
2281 intel_wakeref_t wakeref;
2286 entry = reg_read_whitelist;
2287 remain = ARRAY_SIZE(reg_read_whitelist);
2289 u32 entry_offset = i915_mmio_reg_offset(entry->offset_ldw);
2291 GEM_BUG_ON(!is_power_of_2(entry->size));
2292 GEM_BUG_ON(entry->size > 8);
2293 GEM_BUG_ON(entry_offset & (entry->size - 1));
2295 if (IS_GRAPHICS_VER(i915, entry->min_graphics_ver, entry->max_graphics_ver) &&
2296 entry_offset == (reg->offset & -entry->size))
2305 flags = reg->offset & (entry->size - 1);
2307 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
2308 if (entry->size == 8 && flags == I915_REG_READ_8B_WA)
2309 reg->val = intel_uncore_read64_2x32(uncore,
2312 else if (entry->size == 8 && flags == 0)
2313 reg->val = intel_uncore_read64(uncore,
2315 else if (entry->size == 4 && flags == 0)
2316 reg->val = intel_uncore_read(uncore, entry->offset_ldw);
2317 else if (entry->size == 2 && flags == 0)
2318 reg->val = intel_uncore_read16(uncore,
2320 else if (entry->size == 1 && flags == 0)
2321 reg->val = intel_uncore_read8(uncore,
2331 * __intel_wait_for_register_fw - wait until register matches expected state
2332 * @uncore: the struct intel_uncore
2333 * @reg: the register to read
2334 * @mask: mask to apply to register value
2335 * @value: expected value
2336 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
2337 * @slow_timeout_ms: slow timeout in millisecond
2338 * @out_value: optional placeholder to hold registry value
2340 * This routine waits until the target register @reg contains the expected
2341 * @value after applying the @mask, i.e. it waits until ::
2343 * (intel_uncore_read_fw(uncore, reg) & mask) == value
2345 * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
2346 * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
2347 * must be not larger than 20,0000 microseconds.
2349 * Note that this routine assumes the caller holds forcewake asserted, it is
2350 * not suitable for very long waits. See intel_wait_for_register() if you
2351 * wish to wait without holding forcewake for the duration (i.e. you expect
2352 * the wait to be slow).
2354 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
2356 int __intel_wait_for_register_fw(struct intel_uncore *uncore,
2360 unsigned int fast_timeout_us,
2361 unsigned int slow_timeout_ms,
2365 #define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value)
2368 /* Catch any overuse of this function */
2369 might_sleep_if(slow_timeout_ms);
2370 GEM_BUG_ON(fast_timeout_us > 20000);
2371 GEM_BUG_ON(!fast_timeout_us && !slow_timeout_ms);
2374 if (fast_timeout_us && fast_timeout_us <= 20000)
2375 ret = _wait_for_atomic(done, fast_timeout_us, 0);
2376 if (ret && slow_timeout_ms)
2377 ret = wait_for(done, slow_timeout_ms);
2380 *out_value = reg_value;
2387 * __intel_wait_for_register - wait until register matches expected state
2388 * @uncore: the struct intel_uncore
2389 * @reg: the register to read
2390 * @mask: mask to apply to register value
2391 * @value: expected value
2392 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
2393 * @slow_timeout_ms: slow timeout in millisecond
2394 * @out_value: optional placeholder to hold registry value
2396 * This routine waits until the target register @reg contains the expected
2397 * @value after applying the @mask, i.e. it waits until ::
2399 * (intel_uncore_read(uncore, reg) & mask) == value
2401 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
2403 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
2405 int __intel_wait_for_register(struct intel_uncore *uncore,
2409 unsigned int fast_timeout_us,
2410 unsigned int slow_timeout_ms,
2414 intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
2418 might_sleep_if(slow_timeout_ms);
2420 spin_lock_irq(&uncore->lock);
2421 intel_uncore_forcewake_get__locked(uncore, fw);
2423 ret = __intel_wait_for_register_fw(uncore,
2425 fast_timeout_us, 0, ®_value);
2427 intel_uncore_forcewake_put__locked(uncore, fw);
2428 spin_unlock_irq(&uncore->lock);
2430 if (ret && slow_timeout_ms)
2431 ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore,
2433 (reg_value & mask) == value,
2434 slow_timeout_ms * 1000, 10, 1000);
2436 /* just trace the final value */
2437 trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
2440 *out_value = reg_value;
2445 bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
2449 spin_lock_irq(&uncore->debug->lock);
2450 ret = check_for_unclaimed_mmio(uncore);
2451 spin_unlock_irq(&uncore->debug->lock);
2457 intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
2461 spin_lock_irq(&uncore->debug->lock);
2463 if (unlikely(uncore->debug->unclaimed_mmio_check <= 0))
2466 if (unlikely(check_for_unclaimed_mmio(uncore))) {
2467 if (!uncore->i915->params.mmio_debug) {
2468 drm_dbg(&uncore->i915->drm,
2469 "Unclaimed register detected, "
2470 "enabling oneshot unclaimed register reporting. "
2471 "Please use i915.mmio_debug=N for more information.\n");
2472 uncore->i915->params.mmio_debug++;
2474 uncore->debug->unclaimed_mmio_check--;
2479 spin_unlock_irq(&uncore->debug->lock);
2485 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
2487 * @uncore: pointer to struct intel_uncore
2488 * @reg: register in question
2489 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
2491 * Returns a set of forcewake domains required to be taken with for example
2492 * intel_uncore_forcewake_get for the specified register to be accessible in the
2493 * specified mode (read, write or read/write) with raw mmio accessors.
2495 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
2496 * callers to do FIFO management on their own or risk losing writes.
2498 enum forcewake_domains
2499 intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
2500 i915_reg_t reg, unsigned int op)
2502 enum forcewake_domains fw_domains = 0;
2504 drm_WARN_ON(&uncore->i915->drm, !op);
2506 if (!intel_uncore_has_forcewake(uncore))
2509 if (op & FW_REG_READ)
2510 fw_domains = uncore->funcs.read_fw_domains(uncore, reg);
2512 if (op & FW_REG_WRITE)
2513 fw_domains |= uncore->funcs.write_fw_domains(uncore, reg);
2515 drm_WARN_ON(&uncore->i915->drm, fw_domains & ~uncore->fw_domains);
2520 u32 intel_uncore_read_with_mcr_steering_fw(struct intel_uncore *uncore,
2522 int slice, int subslice)
2524 u32 mcr_mask, mcr_ss, mcr, old_mcr, val;
2526 lockdep_assert_held(&uncore->lock);
2528 if (GRAPHICS_VER(uncore->i915) >= 11) {
2529 mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
2530 mcr_ss = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
2532 mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
2533 mcr_ss = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
2536 old_mcr = mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
2540 intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
2542 val = intel_uncore_read_fw(uncore, reg);
2545 mcr |= old_mcr & mcr_mask;
2547 intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
2552 u32 intel_uncore_read_with_mcr_steering(struct intel_uncore *uncore,
2553 i915_reg_t reg, int slice, int subslice)
2555 enum forcewake_domains fw_domains;
2558 fw_domains = intel_uncore_forcewake_for_reg(uncore, reg,
2560 fw_domains |= intel_uncore_forcewake_for_reg(uncore,
2562 FW_REG_READ | FW_REG_WRITE);
2564 spin_lock_irq(&uncore->lock);
2565 intel_uncore_forcewake_get__locked(uncore, fw_domains);
2567 val = intel_uncore_read_with_mcr_steering_fw(uncore, reg, slice, subslice);
2569 intel_uncore_forcewake_put__locked(uncore, fw_domains);
2570 spin_unlock_irq(&uncore->lock);
2575 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2576 #include "selftests/mock_uncore.c"
2577 #include "selftests/intel_uncore.c"