1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
6 #include <drm/drm_managed.h>
7 #include <drm/intel-gtt.h>
9 #include "gem/i915_gem_internal.h"
10 #include "gem/i915_gem_lmem.h"
11 #include "pxp/intel_pxp.h"
14 #include "i915_perf_oa_regs.h"
15 #include "intel_context.h"
16 #include "intel_engine_pm.h"
17 #include "intel_engine_regs.h"
18 #include "intel_ggtt_gmch.h"
20 #include "intel_gt_buffer_pool.h"
21 #include "intel_gt_clock_utils.h"
22 #include "intel_gt_debugfs.h"
23 #include "intel_gt_mcr.h"
24 #include "intel_gt_pm.h"
25 #include "intel_gt_regs.h"
26 #include "intel_gt_requests.h"
27 #include "intel_migrate.h"
28 #include "intel_mocs.h"
30 #include "intel_rc6.h"
31 #include "intel_renderstate.h"
32 #include "intel_rps.h"
33 #include "intel_gt_sysfs.h"
34 #include "intel_uncore.h"
35 #include "shmem_utils.h"
37 static void __intel_gt_init_early(struct intel_gt *gt)
39 spin_lock_init(>->irq_lock);
41 INIT_LIST_HEAD(>->closed_vma);
42 spin_lock_init(>->closed_lock);
44 init_llist_head(>->watchdog.list);
45 INIT_WORK(>->watchdog.work, intel_gt_watchdog_work);
47 intel_gt_init_buffer_pool(gt);
48 intel_gt_init_reset(gt);
49 intel_gt_init_requests(gt);
50 intel_gt_init_timelines(gt);
51 mutex_init(>->tlb.invalidate_lock);
52 seqcount_mutex_init(>->tlb.seqno, >->tlb.invalidate_lock);
53 intel_gt_pm_init_early(gt);
55 intel_uc_init_early(>->uc);
56 intel_rps_init_early(>->rps);
59 /* Preliminary initialization of Tile 0 */
60 void intel_root_gt_init_early(struct drm_i915_private *i915)
62 struct intel_gt *gt = to_gt(i915);
65 gt->uncore = &i915->uncore;
67 __intel_gt_init_early(gt);
70 static int intel_gt_probe_lmem(struct intel_gt *gt)
72 struct drm_i915_private *i915 = gt->i915;
73 unsigned int instance = gt->info.id;
74 int id = INTEL_REGION_LMEM_0 + instance;
75 struct intel_memory_region *mem;
78 mem = intel_gt_setup_lmem(gt);
85 "Failed to setup region(%d) type=%d\n",
86 err, INTEL_MEMORY_LOCAL);
91 mem->instance = instance;
93 intel_memory_region_set_name(mem, "local%u", mem->instance);
95 GEM_BUG_ON(!HAS_REGION(i915, id));
96 GEM_BUG_ON(i915->mm.regions[id]);
97 i915->mm.regions[id] = mem;
102 int intel_gt_assign_ggtt(struct intel_gt *gt)
104 gt->ggtt = drmm_kzalloc(>->i915->drm, sizeof(*gt->ggtt), GFP_KERNEL);
106 return gt->ggtt ? 0 : -ENOMEM;
109 int intel_gt_init_mmio(struct intel_gt *gt)
111 intel_gt_init_clock_frequency(gt);
113 intel_uc_init_mmio(>->uc);
114 intel_sseu_info_init(gt);
115 intel_gt_mcr_init(gt);
117 return intel_engines_init_mmio(gt);
120 static void init_unused_ring(struct intel_gt *gt, u32 base)
122 struct intel_uncore *uncore = gt->uncore;
124 intel_uncore_write(uncore, RING_CTL(base), 0);
125 intel_uncore_write(uncore, RING_HEAD(base), 0);
126 intel_uncore_write(uncore, RING_TAIL(base), 0);
127 intel_uncore_write(uncore, RING_START(base), 0);
130 static void init_unused_rings(struct intel_gt *gt)
132 struct drm_i915_private *i915 = gt->i915;
135 init_unused_ring(gt, PRB1_BASE);
136 init_unused_ring(gt, SRB0_BASE);
137 init_unused_ring(gt, SRB1_BASE);
138 init_unused_ring(gt, SRB2_BASE);
139 init_unused_ring(gt, SRB3_BASE);
140 } else if (GRAPHICS_VER(i915) == 2) {
141 init_unused_ring(gt, SRB0_BASE);
142 init_unused_ring(gt, SRB1_BASE);
143 } else if (GRAPHICS_VER(i915) == 3) {
144 init_unused_ring(gt, PRB1_BASE);
145 init_unused_ring(gt, PRB2_BASE);
149 int intel_gt_init_hw(struct intel_gt *gt)
151 struct drm_i915_private *i915 = gt->i915;
152 struct intel_uncore *uncore = gt->uncore;
155 gt->last_init_time = ktime_get();
157 /* Double layer security blanket, see i915_gem_init() */
158 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
160 if (HAS_EDRAM(i915) && GRAPHICS_VER(i915) < 9)
161 intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
163 if (IS_HASWELL(i915))
164 intel_uncore_write(uncore,
165 HSW_MI_PREDICATE_RESULT_2,
167 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
169 /* Apply the GT workarounds... */
170 intel_gt_apply_workarounds(gt);
171 /* ...and determine whether they are sticking. */
172 intel_gt_verify_workarounds(gt, "init");
174 intel_gt_init_swizzling(gt);
177 * At least 830 can leave some of the unused rings
178 * "active" (ie. head != tail) after resume which
179 * will prevent c3 entry. Makes sure all unused rings
182 init_unused_rings(gt);
184 ret = i915_ppgtt_init_hw(gt);
186 DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
190 /* We can't enable contexts until all firmware is loaded */
191 ret = intel_uc_init_hw(>->uc);
193 i915_probe_error(i915, "Enabling uc failed (%d)\n", ret);
200 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
204 static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
206 intel_uncore_rmw(uncore, reg, 0, set);
209 static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
211 intel_uncore_rmw(uncore, reg, clr, 0);
214 static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
216 intel_uncore_rmw(uncore, reg, 0, 0);
219 static void gen6_clear_engine_error_register(struct intel_engine_cs *engine)
221 GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
222 GEN6_RING_FAULT_REG_POSTING_READ(engine);
226 intel_gt_clear_error_registers(struct intel_gt *gt,
227 intel_engine_mask_t engine_mask)
229 struct drm_i915_private *i915 = gt->i915;
230 struct intel_uncore *uncore = gt->uncore;
233 if (GRAPHICS_VER(i915) != 2)
234 clear_register(uncore, PGTBL_ER);
236 if (GRAPHICS_VER(i915) < 4)
237 clear_register(uncore, IPEIR(RENDER_RING_BASE));
239 clear_register(uncore, IPEIR_I965);
241 clear_register(uncore, EIR);
242 eir = intel_uncore_read(uncore, EIR);
245 * some errors might have become stuck,
248 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
249 rmw_set(uncore, EMR, eir);
250 intel_uncore_write(uncore, GEN2_IIR,
251 I915_MASTER_ERROR_INTERRUPT);
254 if (GRAPHICS_VER(i915) >= 12) {
255 rmw_clear(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID);
256 intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG);
257 } else if (GRAPHICS_VER(i915) >= 8) {
258 rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
259 intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
260 } else if (GRAPHICS_VER(i915) >= 6) {
261 struct intel_engine_cs *engine;
262 enum intel_engine_id id;
264 for_each_engine_masked(engine, gt, engine_mask, id)
265 gen6_clear_engine_error_register(engine);
269 static void gen6_check_faults(struct intel_gt *gt)
271 struct intel_engine_cs *engine;
272 enum intel_engine_id id;
275 for_each_engine(engine, gt, id) {
276 fault = GEN6_RING_FAULT_REG_READ(engine);
277 if (fault & RING_FAULT_VALID) {
278 drm_dbg(&engine->i915->drm, "Unexpected fault\n"
280 "\tAddress space: %s\n"
284 fault & RING_FAULT_GTTSEL_MASK ?
286 RING_FAULT_SRCID(fault),
287 RING_FAULT_FAULT_TYPE(fault));
292 static void gen8_check_faults(struct intel_gt *gt)
294 struct intel_uncore *uncore = gt->uncore;
295 i915_reg_t fault_reg, fault_data0_reg, fault_data1_reg;
298 if (GRAPHICS_VER(gt->i915) >= 12) {
299 fault_reg = GEN12_RING_FAULT_REG;
300 fault_data0_reg = GEN12_FAULT_TLB_DATA0;
301 fault_data1_reg = GEN12_FAULT_TLB_DATA1;
303 fault_reg = GEN8_RING_FAULT_REG;
304 fault_data0_reg = GEN8_FAULT_TLB_DATA0;
305 fault_data1_reg = GEN8_FAULT_TLB_DATA1;
308 fault = intel_uncore_read(uncore, fault_reg);
309 if (fault & RING_FAULT_VALID) {
310 u32 fault_data0, fault_data1;
313 fault_data0 = intel_uncore_read(uncore, fault_data0_reg);
314 fault_data1 = intel_uncore_read(uncore, fault_data1_reg);
316 fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
317 ((u64)fault_data0 << 12);
319 drm_dbg(&uncore->i915->drm, "Unexpected fault\n"
320 "\tAddr: 0x%08x_%08x\n"
321 "\tAddress space: %s\n"
325 upper_32_bits(fault_addr), lower_32_bits(fault_addr),
326 fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
327 GEN8_RING_FAULT_ENGINE_ID(fault),
328 RING_FAULT_SRCID(fault),
329 RING_FAULT_FAULT_TYPE(fault));
333 void intel_gt_check_and_clear_faults(struct intel_gt *gt)
335 struct drm_i915_private *i915 = gt->i915;
337 /* From GEN8 onwards we only have one 'All Engine Fault Register' */
338 if (GRAPHICS_VER(i915) >= 8)
339 gen8_check_faults(gt);
340 else if (GRAPHICS_VER(i915) >= 6)
341 gen6_check_faults(gt);
345 intel_gt_clear_error_registers(gt, ALL_ENGINES);
348 void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
350 struct intel_uncore *uncore = gt->uncore;
351 intel_wakeref_t wakeref;
354 * No actual flushing is required for the GTT write domain for reads
355 * from the GTT domain. Writes to it "immediately" go to main memory
356 * as far as we know, so there's no chipset flush. It also doesn't
357 * land in the GPU render cache.
359 * However, we do have to enforce the order so that all writes through
360 * the GTT land before any writes to the device, such as updates to
363 * We also have to wait a bit for the writes to land from the GTT.
364 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
365 * timing. This issue has only been observed when switching quickly
366 * between GTT writes and CPU reads from inside the kernel on recent hw,
367 * and it appears to only affect discrete GTT blocks (i.e. on LLC
368 * system agents we cannot reproduce this behaviour, until Cannonlake
374 if (INTEL_INFO(gt->i915)->has_coherent_ggtt)
377 intel_gt_chipset_flush(gt);
379 with_intel_runtime_pm_if_in_use(uncore->rpm, wakeref) {
382 spin_lock_irqsave(&uncore->lock, flags);
383 intel_uncore_posting_read_fw(uncore,
384 RING_HEAD(RENDER_RING_BASE));
385 spin_unlock_irqrestore(&uncore->lock, flags);
389 void intel_gt_chipset_flush(struct intel_gt *gt)
392 if (GRAPHICS_VER(gt->i915) < 6)
393 intel_ggtt_gmch_flush();
396 void intel_gt_driver_register(struct intel_gt *gt)
398 intel_gsc_init(>->gsc, gt->i915);
400 intel_rps_driver_register(>->rps);
402 intel_gt_debugfs_register(gt);
403 intel_gt_sysfs_register(gt);
406 static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
408 struct drm_i915_private *i915 = gt->i915;
409 struct drm_i915_gem_object *obj;
410 struct i915_vma *vma;
413 obj = i915_gem_object_create_lmem(i915, size,
414 I915_BO_ALLOC_VOLATILE |
415 I915_BO_ALLOC_GPU_ONLY);
417 obj = i915_gem_object_create_stolen(i915, size);
419 obj = i915_gem_object_create_internal(i915, size);
421 drm_err(&i915->drm, "Failed to allocate scratch page\n");
425 vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
431 ret = i915_ggtt_pin(vma, NULL, 0, PIN_HIGH);
435 gt->scratch = i915_vma_make_unshrinkable(vma);
440 i915_gem_object_put(obj);
444 static void intel_gt_fini_scratch(struct intel_gt *gt)
446 i915_vma_unpin_and_release(>->scratch, 0);
449 static struct i915_address_space *kernel_vm(struct intel_gt *gt)
451 if (INTEL_PPGTT(gt->i915) > INTEL_PPGTT_ALIASING)
452 return &i915_ppgtt_create(gt, I915_BO_ALLOC_PM_EARLY)->vm;
454 return i915_vm_get(>->ggtt->vm);
457 static int __engines_record_defaults(struct intel_gt *gt)
459 struct i915_request *requests[I915_NUM_ENGINES] = {};
460 struct intel_engine_cs *engine;
461 enum intel_engine_id id;
465 * As we reset the gpu during very early sanitisation, the current
466 * register state on the GPU should reflect its defaults values.
467 * We load a context onto the hw (with restore-inhibit), then switch
468 * over to a second context to save that default register state. We
469 * can then prime every new context with that state so they all start
470 * from the same default HW values.
473 for_each_engine(engine, gt, id) {
474 struct intel_renderstate so;
475 struct intel_context *ce;
476 struct i915_request *rq;
478 /* We must be able to switch to something! */
479 GEM_BUG_ON(!engine->kernel_context);
481 ce = intel_context_create(engine);
487 err = intel_renderstate_init(&so, ce);
491 rq = i915_request_create(ce);
497 err = intel_engine_emit_ctx_wa(rq);
501 err = intel_renderstate_emit(&so, rq);
506 requests[id] = i915_request_get(rq);
507 i915_request_add(rq);
509 intel_renderstate_fini(&so, ce);
512 intel_context_put(ce);
517 /* Flush the default context image to memory, and enable powersaving. */
518 if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
523 for (id = 0; id < ARRAY_SIZE(requests); id++) {
524 struct i915_request *rq;
531 if (rq->fence.error) {
536 GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags));
537 if (!rq->context->state)
540 /* Keep a copy of the state's backing pages; free the obj */
541 state = shmem_create_from_object(rq->context->state->obj);
543 err = PTR_ERR(state);
546 rq->engine->default_state = state;
551 * If we have to abandon now, we expect the engines to be idle
552 * and ready to be torn-down. The quickest way we can accomplish
553 * this is by declaring ourselves wedged.
556 intel_gt_set_wedged(gt);
558 for (id = 0; id < ARRAY_SIZE(requests); id++) {
559 struct intel_context *ce;
560 struct i915_request *rq;
567 i915_request_put(rq);
568 intel_context_put(ce);
573 static int __engines_verify_workarounds(struct intel_gt *gt)
575 struct intel_engine_cs *engine;
576 enum intel_engine_id id;
579 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
582 for_each_engine(engine, gt, id) {
583 if (intel_engine_verify_workarounds(engine, "load"))
587 /* Flush and restore the kernel context for safety */
588 if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME)
594 static void __intel_gt_disable(struct intel_gt *gt)
596 intel_gt_set_wedged_on_fini(gt);
598 intel_gt_suspend_prepare(gt);
599 intel_gt_suspend_late(gt);
601 GEM_BUG_ON(intel_gt_pm_is_awake(gt));
604 int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
606 long remaining_timeout;
608 /* If the device is asleep, we have no requests outstanding */
609 if (!intel_gt_pm_is_awake(gt))
612 while ((timeout = intel_gt_retire_requests_timeout(gt, timeout,
613 &remaining_timeout)) > 0) {
615 if (signal_pending(current))
619 return timeout ? timeout : intel_uc_wait_for_idle(>->uc,
623 int intel_gt_init(struct intel_gt *gt)
627 err = i915_inject_probe_error(gt->i915, -ENODEV);
631 intel_gt_init_workarounds(gt);
634 * This is just a security blanket to placate dragons.
635 * On some systems, we very sporadically observe that the first TLBs
636 * used by the CS may be stale, despite us poking the TLB reset. If
637 * we hold the forcewake during initialisation these problems
638 * just magically go away.
640 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
642 err = intel_gt_init_scratch(gt,
643 GRAPHICS_VER(gt->i915) == 2 ? SZ_256K : SZ_4K);
647 intel_gt_pm_init(gt);
649 gt->vm = kernel_vm(gt);
655 intel_set_mocs_index(gt);
657 err = intel_engines_init(gt);
661 err = intel_uc_init(>->uc);
665 err = intel_gt_resume(gt);
669 err = intel_gt_init_hwconfig(gt);
671 drm_err(>->i915->drm, "Failed to retrieve hwconfig table: %pe\n",
674 err = __engines_record_defaults(gt);
678 err = __engines_verify_workarounds(gt);
682 intel_uc_init_late(>->uc);
684 err = i915_inject_probe_error(gt->i915, -EIO);
688 intel_migrate_init(>->migrate, gt);
690 intel_pxp_init(>->pxp);
694 __intel_gt_disable(gt);
695 intel_uc_fini_hw(>->uc);
697 intel_uc_fini(>->uc);
699 intel_engines_release(gt);
700 i915_vm_put(fetch_and_zero(>->vm));
702 intel_gt_pm_fini(gt);
703 intel_gt_fini_scratch(gt);
706 intel_gt_set_wedged_on_init(gt);
707 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
711 void intel_gt_driver_remove(struct intel_gt *gt)
713 __intel_gt_disable(gt);
715 intel_migrate_fini(>->migrate);
716 intel_uc_driver_remove(>->uc);
718 intel_engines_release(gt);
720 intel_gt_flush_buffer_pool(gt);
723 void intel_gt_driver_unregister(struct intel_gt *gt)
725 intel_wakeref_t wakeref;
727 intel_gt_sysfs_unregister(gt);
728 intel_rps_driver_unregister(>->rps);
729 intel_gsc_fini(>->gsc);
731 intel_pxp_fini(>->pxp);
734 * Upon unregistering the device to prevent any new users, cancel
735 * all in-flight requests so that we can quickly unbind the active
738 intel_gt_set_wedged_on_fini(gt);
740 /* Scrub all HW state upon release */
741 with_intel_runtime_pm(gt->uncore->rpm, wakeref)
742 __intel_gt_reset(gt, ALL_ENGINES);
745 void intel_gt_driver_release(struct intel_gt *gt)
747 struct i915_address_space *vm;
749 vm = fetch_and_zero(>->vm);
750 if (vm) /* FIXME being called twice on error paths :( */
753 intel_wa_list_free(>->wa_list);
754 intel_gt_pm_fini(gt);
755 intel_gt_fini_scratch(gt);
756 intel_gt_fini_buffer_pool(gt);
757 intel_gt_fini_hwconfig(gt);
760 void intel_gt_driver_late_release_all(struct drm_i915_private *i915)
765 /* We need to wait for inflight RCU frees to release their grip */
768 for_each_gt(gt, i915, id) {
769 intel_uc_driver_late_release(>->uc);
770 intel_gt_fini_requests(gt);
771 intel_gt_fini_reset(gt);
772 intel_gt_fini_timelines(gt);
773 mutex_destroy(>->tlb.invalidate_lock);
774 intel_engines_free(gt);
778 static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr)
782 if (!gt_is_root(gt)) {
783 struct intel_uncore_mmio_debug *mmio_debug;
784 struct intel_uncore *uncore;
786 uncore = kzalloc(sizeof(*uncore), GFP_KERNEL);
790 mmio_debug = kzalloc(sizeof(*mmio_debug), GFP_KERNEL);
797 gt->uncore->debug = mmio_debug;
799 __intel_gt_init_early(gt);
802 intel_uncore_init_early(gt->uncore, gt);
804 ret = intel_uncore_setup_mmio(gt->uncore, phys_addr);
808 gt->phys_addr = phys_addr;
814 intel_gt_tile_cleanup(struct intel_gt *gt)
816 intel_uncore_cleanup_mmio(gt->uncore);
818 if (!gt_is_root(gt)) {
819 kfree(gt->uncore->debug);
825 int intel_gt_probe_all(struct drm_i915_private *i915)
827 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
828 struct intel_gt *gt = &i915->gt0;
829 phys_addr_t phys_addr;
830 unsigned int mmio_bar;
833 mmio_bar = GRAPHICS_VER(i915) == 2 ? 1 : 0;
834 phys_addr = pci_resource_start(pdev, mmio_bar);
837 * We always have at least one primary GT on any device
838 * and it has been already initialized early during probe
839 * in i915_driver_probe()
841 ret = intel_gt_tile_setup(gt, phys_addr);
847 /* TODO: add more tiles */
851 int intel_gt_tiles_init(struct drm_i915_private *i915)
857 for_each_gt(gt, i915, id) {
858 ret = intel_gt_probe_lmem(gt);
866 void intel_gt_release_all(struct drm_i915_private *i915)
871 for_each_gt(gt, i915, id) {
872 intel_gt_tile_cleanup(gt);
877 void intel_gt_info_print(const struct intel_gt_info *info,
878 struct drm_printer *p)
880 drm_printf(p, "available engines: %x\n", info->engine_mask);
882 intel_sseu_dump(&info->sseu, p);
890 static struct reg_and_bit
891 get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8,
892 const i915_reg_t *regs, const unsigned int num)
894 const unsigned int class = engine->class;
895 struct reg_and_bit rb = { };
897 if (drm_WARN_ON_ONCE(&engine->i915->drm,
898 class >= num || !regs[class].reg))
901 rb.reg = regs[class];
902 if (gen8 && class == VIDEO_DECODE_CLASS)
903 rb.reg.reg += 4 * engine->instance; /* GEN8_M2TCR */
905 rb.bit = engine->instance;
907 rb.bit = BIT(rb.bit);
912 static void mmio_invalidate_full(struct intel_gt *gt)
914 static const i915_reg_t gen8_regs[] = {
915 [RENDER_CLASS] = GEN8_RTCR,
916 [VIDEO_DECODE_CLASS] = GEN8_M1TCR, /* , GEN8_M2TCR */
917 [VIDEO_ENHANCEMENT_CLASS] = GEN8_VTCR,
918 [COPY_ENGINE_CLASS] = GEN8_BTCR,
920 static const i915_reg_t gen12_regs[] = {
921 [RENDER_CLASS] = GEN12_GFX_TLB_INV_CR,
922 [VIDEO_DECODE_CLASS] = GEN12_VD_TLB_INV_CR,
923 [VIDEO_ENHANCEMENT_CLASS] = GEN12_VE_TLB_INV_CR,
924 [COPY_ENGINE_CLASS] = GEN12_BLT_TLB_INV_CR,
925 [COMPUTE_CLASS] = GEN12_COMPCTX_TLB_INV_CR,
927 struct drm_i915_private *i915 = gt->i915;
928 struct intel_uncore *uncore = gt->uncore;
929 struct intel_engine_cs *engine;
930 intel_engine_mask_t awake, tmp;
931 enum intel_engine_id id;
932 const i915_reg_t *regs;
933 unsigned int num = 0;
935 if (GRAPHICS_VER(i915) == 12) {
937 num = ARRAY_SIZE(gen12_regs);
938 } else if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) <= 11) {
940 num = ARRAY_SIZE(gen8_regs);
941 } else if (GRAPHICS_VER(i915) < 8) {
945 if (drm_WARN_ONCE(&i915->drm, !num,
946 "Platform does not implement TLB invalidation!"))
949 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
951 spin_lock_irq(&uncore->lock); /* serialise invalidate with GT reset */
954 for_each_engine(engine, gt, id) {
955 struct reg_and_bit rb;
957 if (!intel_engine_pm_is_awake(engine))
960 rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
961 if (!i915_mmio_reg_offset(rb.reg))
964 intel_uncore_write_fw(uncore, rb.reg, rb.bit);
965 awake |= engine->mask;
968 GT_TRACE(gt, "invalidated engines %08x\n", awake);
970 /* Wa_2207587034:tgl,dg1,rkl,adl-s,adl-p */
972 (IS_TIGERLAKE(i915) ||
974 IS_ROCKETLAKE(i915) ||
975 IS_ALDERLAKE_S(i915) ||
976 IS_ALDERLAKE_P(i915)))
977 intel_uncore_write_fw(uncore, GEN12_OA_TLB_INV_CR, 1);
979 spin_unlock_irq(&uncore->lock);
981 for_each_engine_masked(engine, gt, awake, tmp) {
982 struct reg_and_bit rb;
985 * HW architecture suggest typical invalidation time at 40us,
986 * with pessimistic cases up to 100us and a recommendation to
987 * cap at 1ms. We go a bit higher just in case.
989 const unsigned int timeout_us = 100;
990 const unsigned int timeout_ms = 4;
992 rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
993 if (__intel_wait_for_register_fw(uncore,
995 timeout_us, timeout_ms,
997 drm_err_ratelimited(>->i915->drm,
998 "%s TLB invalidation did not complete in %ums!\n",
999 engine->name, timeout_ms);
1003 * Use delayed put since a) we mostly expect a flurry of TLB
1004 * invalidations so it is good to avoid paying the forcewake cost and
1005 * b) it works around a bug in Icelake which cannot cope with too rapid
1008 intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL);
1011 static bool tlb_seqno_passed(const struct intel_gt *gt, u32 seqno)
1013 u32 cur = intel_gt_tlb_seqno(gt);
1015 /* Only skip if a *full* TLB invalidate barrier has passed */
1016 return (s32)(cur - ALIGN(seqno, 2)) > 0;
1019 void intel_gt_invalidate_tlb(struct intel_gt *gt, u32 seqno)
1021 intel_wakeref_t wakeref;
1023 if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
1026 if (intel_gt_is_wedged(gt))
1029 if (tlb_seqno_passed(gt, seqno))
1032 with_intel_gt_pm_if_awake(gt, wakeref) {
1033 mutex_lock(>->tlb.invalidate_lock);
1034 if (tlb_seqno_passed(gt, seqno))
1037 mmio_invalidate_full(gt);
1039 write_seqcount_invalidate(>->tlb.seqno);
1041 mutex_unlock(>->tlb.invalidate_lock);