1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
6 #include <drm/drm_managed.h>
7 #include <drm/intel-gtt.h>
9 #include "gem/i915_gem_internal.h"
10 #include "gem/i915_gem_lmem.h"
13 #include "i915_perf_oa_regs.h"
15 #include "intel_context.h"
16 #include "intel_engine_pm.h"
17 #include "intel_engine_regs.h"
18 #include "intel_ggtt_gmch.h"
20 #include "intel_gt_buffer_pool.h"
21 #include "intel_gt_clock_utils.h"
22 #include "intel_gt_debugfs.h"
23 #include "intel_gt_mcr.h"
24 #include "intel_gt_pm.h"
25 #include "intel_gt_regs.h"
26 #include "intel_gt_requests.h"
27 #include "intel_migrate.h"
28 #include "intel_mocs.h"
29 #include "intel_pci_config.h"
31 #include "intel_rc6.h"
32 #include "intel_renderstate.h"
33 #include "intel_rps.h"
34 #include "intel_sa_media.h"
35 #include "intel_gt_sysfs.h"
36 #include "intel_uncore.h"
37 #include "shmem_utils.h"
39 void intel_gt_common_init_early(struct intel_gt *gt)
41 spin_lock_init(gt->irq_lock);
43 INIT_LIST_HEAD(>->closed_vma);
44 spin_lock_init(>->closed_lock);
46 init_llist_head(>->watchdog.list);
47 INIT_WORK(>->watchdog.work, intel_gt_watchdog_work);
49 intel_gt_init_buffer_pool(gt);
50 intel_gt_init_reset(gt);
51 intel_gt_init_requests(gt);
52 intel_gt_init_timelines(gt);
53 mutex_init(>->tlb.invalidate_lock);
54 seqcount_mutex_init(>->tlb.seqno, >->tlb.invalidate_lock);
55 intel_gt_pm_init_early(gt);
57 intel_wopcm_init_early(>->wopcm);
58 intel_uc_init_early(>->uc);
59 intel_rps_init_early(>->rps);
62 /* Preliminary initialization of Tile 0 */
63 int intel_root_gt_init_early(struct drm_i915_private *i915)
65 struct intel_gt *gt = to_gt(i915);
68 gt->uncore = &i915->uncore;
69 gt->irq_lock = drmm_kzalloc(&i915->drm, sizeof(*gt->irq_lock), GFP_KERNEL);
73 intel_gt_common_init_early(gt);
78 static int intel_gt_probe_lmem(struct intel_gt *gt)
80 struct drm_i915_private *i915 = gt->i915;
81 unsigned int instance = gt->info.id;
82 int id = INTEL_REGION_LMEM_0 + instance;
83 struct intel_memory_region *mem;
86 mem = intel_gt_setup_lmem(gt);
93 "Failed to setup region(%d) type=%d\n",
94 err, INTEL_MEMORY_LOCAL);
99 mem->instance = instance;
101 intel_memory_region_set_name(mem, "local%u", mem->instance);
103 GEM_BUG_ON(!HAS_REGION(i915, id));
104 GEM_BUG_ON(i915->mm.regions[id]);
105 i915->mm.regions[id] = mem;
110 int intel_gt_assign_ggtt(struct intel_gt *gt)
112 /* Media GT shares primary GT's GGTT */
113 if (gt->type == GT_MEDIA) {
114 gt->ggtt = to_gt(gt->i915)->ggtt;
116 gt->ggtt = i915_ggtt_create(gt->i915);
117 if (IS_ERR(gt->ggtt))
118 return PTR_ERR(gt->ggtt);
121 list_add_tail(>->ggtt_link, >->ggtt->gt_list);
126 int intel_gt_init_mmio(struct intel_gt *gt)
128 intel_gt_init_clock_frequency(gt);
130 intel_uc_init_mmio(>->uc);
131 intel_sseu_info_init(gt);
132 intel_gt_mcr_init(gt);
134 return intel_engines_init_mmio(gt);
137 static void init_unused_ring(struct intel_gt *gt, u32 base)
139 struct intel_uncore *uncore = gt->uncore;
141 intel_uncore_write(uncore, RING_CTL(base), 0);
142 intel_uncore_write(uncore, RING_HEAD(base), 0);
143 intel_uncore_write(uncore, RING_TAIL(base), 0);
144 intel_uncore_write(uncore, RING_START(base), 0);
147 static void init_unused_rings(struct intel_gt *gt)
149 struct drm_i915_private *i915 = gt->i915;
152 init_unused_ring(gt, PRB1_BASE);
153 init_unused_ring(gt, SRB0_BASE);
154 init_unused_ring(gt, SRB1_BASE);
155 init_unused_ring(gt, SRB2_BASE);
156 init_unused_ring(gt, SRB3_BASE);
157 } else if (GRAPHICS_VER(i915) == 2) {
158 init_unused_ring(gt, SRB0_BASE);
159 init_unused_ring(gt, SRB1_BASE);
160 } else if (GRAPHICS_VER(i915) == 3) {
161 init_unused_ring(gt, PRB1_BASE);
162 init_unused_ring(gt, PRB2_BASE);
166 int intel_gt_init_hw(struct intel_gt *gt)
168 struct drm_i915_private *i915 = gt->i915;
169 struct intel_uncore *uncore = gt->uncore;
172 gt->last_init_time = ktime_get();
174 /* Double layer security blanket, see i915_gem_init() */
175 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
177 if (HAS_EDRAM(i915) && GRAPHICS_VER(i915) < 9)
178 intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
180 if (IS_HASWELL(i915))
181 intel_uncore_write(uncore,
182 HSW_MI_PREDICATE_RESULT_2,
184 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
186 /* Apply the GT workarounds... */
187 intel_gt_apply_workarounds(gt);
188 /* ...and determine whether they are sticking. */
189 intel_gt_verify_workarounds(gt, "init");
191 intel_gt_init_swizzling(gt);
194 * At least 830 can leave some of the unused rings
195 * "active" (ie. head != tail) after resume which
196 * will prevent c3 entry. Makes sure all unused rings
199 init_unused_rings(gt);
201 ret = i915_ppgtt_init_hw(gt);
203 drm_err(&i915->drm, "Enabling PPGTT failed (%d)\n", ret);
207 /* We can't enable contexts until all firmware is loaded */
208 ret = intel_uc_init_hw(>->uc);
210 i915_probe_error(i915, "Enabling uc failed (%d)\n", ret);
217 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
221 static void gen6_clear_engine_error_register(struct intel_engine_cs *engine)
223 GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
224 GEN6_RING_FAULT_REG_POSTING_READ(engine);
227 i915_reg_t intel_gt_perf_limit_reasons_reg(struct intel_gt *gt)
229 /* GT0_PERF_LIMIT_REASONS is available only for Gen11+ */
230 if (GRAPHICS_VER(gt->i915) < 11)
231 return INVALID_MMIO_REG;
233 return gt->type == GT_MEDIA ?
234 MTL_MEDIA_PERF_LIMIT_REASONS : GT0_PERF_LIMIT_REASONS;
238 intel_gt_clear_error_registers(struct intel_gt *gt,
239 intel_engine_mask_t engine_mask)
241 struct drm_i915_private *i915 = gt->i915;
242 struct intel_uncore *uncore = gt->uncore;
245 if (GRAPHICS_VER(i915) != 2)
246 intel_uncore_write(uncore, PGTBL_ER, 0);
248 if (GRAPHICS_VER(i915) < 4)
249 intel_uncore_write(uncore, IPEIR(RENDER_RING_BASE), 0);
251 intel_uncore_write(uncore, IPEIR_I965, 0);
253 intel_uncore_write(uncore, EIR, 0);
254 eir = intel_uncore_read(uncore, EIR);
257 * some errors might have become stuck,
260 drm_dbg(>->i915->drm, "EIR stuck: 0x%08x, masking\n", eir);
261 intel_uncore_rmw(uncore, EMR, 0, eir);
262 intel_uncore_write(uncore, GEN2_IIR,
263 I915_MASTER_ERROR_INTERRUPT);
266 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
267 intel_gt_mcr_multicast_rmw(gt, XEHP_RING_FAULT_REG,
268 RING_FAULT_VALID, 0);
269 intel_gt_mcr_read_any(gt, XEHP_RING_FAULT_REG);
270 } else if (GRAPHICS_VER(i915) >= 12) {
271 intel_uncore_rmw(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID, 0);
272 intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG);
273 } else if (GRAPHICS_VER(i915) >= 8) {
274 intel_uncore_rmw(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID, 0);
275 intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
276 } else if (GRAPHICS_VER(i915) >= 6) {
277 struct intel_engine_cs *engine;
278 enum intel_engine_id id;
280 for_each_engine_masked(engine, gt, engine_mask, id)
281 gen6_clear_engine_error_register(engine);
285 static void gen6_check_faults(struct intel_gt *gt)
287 struct intel_engine_cs *engine;
288 enum intel_engine_id id;
291 for_each_engine(engine, gt, id) {
292 fault = GEN6_RING_FAULT_REG_READ(engine);
293 if (fault & RING_FAULT_VALID) {
294 drm_dbg(&engine->i915->drm, "Unexpected fault\n"
296 "\tAddress space: %s\n"
300 fault & RING_FAULT_GTTSEL_MASK ?
302 RING_FAULT_SRCID(fault),
303 RING_FAULT_FAULT_TYPE(fault));
308 static void xehp_check_faults(struct intel_gt *gt)
313 * Although the fault register now lives in an MCR register range,
314 * the GAM registers are special and we only truly need to read
315 * the "primary" GAM instance rather than handling each instance
316 * individually. intel_gt_mcr_read_any() will automatically steer
317 * toward the primary instance.
319 fault = intel_gt_mcr_read_any(gt, XEHP_RING_FAULT_REG);
320 if (fault & RING_FAULT_VALID) {
321 u32 fault_data0, fault_data1;
324 fault_data0 = intel_gt_mcr_read_any(gt, XEHP_FAULT_TLB_DATA0);
325 fault_data1 = intel_gt_mcr_read_any(gt, XEHP_FAULT_TLB_DATA1);
327 fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
328 ((u64)fault_data0 << 12);
330 drm_dbg(>->i915->drm, "Unexpected fault\n"
331 "\tAddr: 0x%08x_%08x\n"
332 "\tAddress space: %s\n"
336 upper_32_bits(fault_addr), lower_32_bits(fault_addr),
337 fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
338 GEN8_RING_FAULT_ENGINE_ID(fault),
339 RING_FAULT_SRCID(fault),
340 RING_FAULT_FAULT_TYPE(fault));
344 static void gen8_check_faults(struct intel_gt *gt)
346 struct intel_uncore *uncore = gt->uncore;
347 i915_reg_t fault_reg, fault_data0_reg, fault_data1_reg;
350 if (GRAPHICS_VER(gt->i915) >= 12) {
351 fault_reg = GEN12_RING_FAULT_REG;
352 fault_data0_reg = GEN12_FAULT_TLB_DATA0;
353 fault_data1_reg = GEN12_FAULT_TLB_DATA1;
355 fault_reg = GEN8_RING_FAULT_REG;
356 fault_data0_reg = GEN8_FAULT_TLB_DATA0;
357 fault_data1_reg = GEN8_FAULT_TLB_DATA1;
360 fault = intel_uncore_read(uncore, fault_reg);
361 if (fault & RING_FAULT_VALID) {
362 u32 fault_data0, fault_data1;
365 fault_data0 = intel_uncore_read(uncore, fault_data0_reg);
366 fault_data1 = intel_uncore_read(uncore, fault_data1_reg);
368 fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
369 ((u64)fault_data0 << 12);
371 drm_dbg(&uncore->i915->drm, "Unexpected fault\n"
372 "\tAddr: 0x%08x_%08x\n"
373 "\tAddress space: %s\n"
377 upper_32_bits(fault_addr), lower_32_bits(fault_addr),
378 fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
379 GEN8_RING_FAULT_ENGINE_ID(fault),
380 RING_FAULT_SRCID(fault),
381 RING_FAULT_FAULT_TYPE(fault));
385 void intel_gt_check_and_clear_faults(struct intel_gt *gt)
387 struct drm_i915_private *i915 = gt->i915;
389 /* From GEN8 onwards we only have one 'All Engine Fault Register' */
390 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
391 xehp_check_faults(gt);
392 else if (GRAPHICS_VER(i915) >= 8)
393 gen8_check_faults(gt);
394 else if (GRAPHICS_VER(i915) >= 6)
395 gen6_check_faults(gt);
399 intel_gt_clear_error_registers(gt, ALL_ENGINES);
402 void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
404 struct intel_uncore *uncore = gt->uncore;
405 intel_wakeref_t wakeref;
408 * No actual flushing is required for the GTT write domain for reads
409 * from the GTT domain. Writes to it "immediately" go to main memory
410 * as far as we know, so there's no chipset flush. It also doesn't
411 * land in the GPU render cache.
413 * However, we do have to enforce the order so that all writes through
414 * the GTT land before any writes to the device, such as updates to
417 * We also have to wait a bit for the writes to land from the GTT.
418 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
419 * timing. This issue has only been observed when switching quickly
420 * between GTT writes and CPU reads from inside the kernel on recent hw,
421 * and it appears to only affect discrete GTT blocks (i.e. on LLC
422 * system agents we cannot reproduce this behaviour, until Cannonlake
428 if (INTEL_INFO(gt->i915)->has_coherent_ggtt)
431 intel_gt_chipset_flush(gt);
433 with_intel_runtime_pm_if_in_use(uncore->rpm, wakeref) {
436 spin_lock_irqsave(&uncore->lock, flags);
437 intel_uncore_posting_read_fw(uncore,
438 RING_HEAD(RENDER_RING_BASE));
439 spin_unlock_irqrestore(&uncore->lock, flags);
443 void intel_gt_chipset_flush(struct intel_gt *gt)
446 if (GRAPHICS_VER(gt->i915) < 6)
447 intel_ggtt_gmch_flush();
450 void intel_gt_driver_register(struct intel_gt *gt)
452 intel_gsc_init(>->gsc, gt->i915);
454 intel_rps_driver_register(>->rps);
456 intel_gt_debugfs_register(gt);
457 intel_gt_sysfs_register(gt);
460 static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
462 struct drm_i915_private *i915 = gt->i915;
463 struct drm_i915_gem_object *obj;
464 struct i915_vma *vma;
467 obj = i915_gem_object_create_lmem(i915, size,
468 I915_BO_ALLOC_VOLATILE |
469 I915_BO_ALLOC_GPU_ONLY);
471 obj = i915_gem_object_create_stolen(i915, size);
473 obj = i915_gem_object_create_internal(i915, size);
475 drm_err(&i915->drm, "Failed to allocate scratch page\n");
479 vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
485 ret = i915_ggtt_pin(vma, NULL, 0, PIN_HIGH);
489 gt->scratch = i915_vma_make_unshrinkable(vma);
494 i915_gem_object_put(obj);
498 static void intel_gt_fini_scratch(struct intel_gt *gt)
500 i915_vma_unpin_and_release(>->scratch, 0);
503 static struct i915_address_space *kernel_vm(struct intel_gt *gt)
505 if (INTEL_PPGTT(gt->i915) > INTEL_PPGTT_ALIASING)
506 return &i915_ppgtt_create(gt, I915_BO_ALLOC_PM_EARLY)->vm;
508 return i915_vm_get(>->ggtt->vm);
511 static int __engines_record_defaults(struct intel_gt *gt)
513 struct i915_request *requests[I915_NUM_ENGINES] = {};
514 struct intel_engine_cs *engine;
515 enum intel_engine_id id;
519 * As we reset the gpu during very early sanitisation, the current
520 * register state on the GPU should reflect its defaults values.
521 * We load a context onto the hw (with restore-inhibit), then switch
522 * over to a second context to save that default register state. We
523 * can then prime every new context with that state so they all start
524 * from the same default HW values.
527 for_each_engine(engine, gt, id) {
528 struct intel_renderstate so;
529 struct intel_context *ce;
530 struct i915_request *rq;
532 /* We must be able to switch to something! */
533 GEM_BUG_ON(!engine->kernel_context);
535 ce = intel_context_create(engine);
541 err = intel_renderstate_init(&so, ce);
545 rq = i915_request_create(ce);
551 err = intel_engine_emit_ctx_wa(rq);
555 err = intel_renderstate_emit(&so, rq);
560 requests[id] = i915_request_get(rq);
561 i915_request_add(rq);
563 intel_renderstate_fini(&so, ce);
566 intel_context_put(ce);
571 /* Flush the default context image to memory, and enable powersaving. */
572 if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
577 for (id = 0; id < ARRAY_SIZE(requests); id++) {
578 struct i915_request *rq;
585 if (rq->fence.error) {
590 GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags));
591 if (!rq->context->state)
594 /* Keep a copy of the state's backing pages; free the obj */
595 state = shmem_create_from_object(rq->context->state->obj);
597 err = PTR_ERR(state);
600 rq->engine->default_state = state;
605 * If we have to abandon now, we expect the engines to be idle
606 * and ready to be torn-down. The quickest way we can accomplish
607 * this is by declaring ourselves wedged.
610 intel_gt_set_wedged(gt);
612 for (id = 0; id < ARRAY_SIZE(requests); id++) {
613 struct intel_context *ce;
614 struct i915_request *rq;
621 i915_request_put(rq);
622 intel_context_put(ce);
627 static int __engines_verify_workarounds(struct intel_gt *gt)
629 struct intel_engine_cs *engine;
630 enum intel_engine_id id;
633 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
636 for_each_engine(engine, gt, id) {
637 if (intel_engine_verify_workarounds(engine, "load"))
641 /* Flush and restore the kernel context for safety */
642 if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME)
648 static void __intel_gt_disable(struct intel_gt *gt)
650 intel_gt_set_wedged_on_fini(gt);
652 intel_gt_suspend_prepare(gt);
653 intel_gt_suspend_late(gt);
655 GEM_BUG_ON(intel_gt_pm_is_awake(gt));
658 int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
660 long remaining_timeout;
662 /* If the device is asleep, we have no requests outstanding */
663 if (!intel_gt_pm_is_awake(gt))
666 while ((timeout = intel_gt_retire_requests_timeout(gt, timeout,
667 &remaining_timeout)) > 0) {
669 if (signal_pending(current))
676 if (remaining_timeout < 0)
677 remaining_timeout = 0;
679 return intel_uc_wait_for_idle(>->uc, remaining_timeout);
682 int intel_gt_init(struct intel_gt *gt)
686 err = i915_inject_probe_error(gt->i915, -ENODEV);
690 intel_gt_init_workarounds(gt);
693 * This is just a security blanket to placate dragons.
694 * On some systems, we very sporadically observe that the first TLBs
695 * used by the CS may be stale, despite us poking the TLB reset. If
696 * we hold the forcewake during initialisation these problems
697 * just magically go away.
699 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
701 err = intel_gt_init_scratch(gt,
702 GRAPHICS_VER(gt->i915) == 2 ? SZ_256K : SZ_4K);
706 intel_gt_pm_init(gt);
708 gt->vm = kernel_vm(gt);
714 intel_set_mocs_index(gt);
716 err = intel_engines_init(gt);
720 err = intel_uc_init(>->uc);
724 err = intel_gt_resume(gt);
728 err = intel_gt_init_hwconfig(gt);
730 drm_err(>->i915->drm, "Failed to retrieve hwconfig table: %pe\n",
733 err = __engines_record_defaults(gt);
737 err = __engines_verify_workarounds(gt);
741 intel_uc_init_late(>->uc);
743 err = i915_inject_probe_error(gt->i915, -EIO);
747 intel_migrate_init(>->migrate, gt);
751 __intel_gt_disable(gt);
752 intel_uc_fini_hw(>->uc);
754 intel_uc_fini(>->uc);
756 intel_engines_release(gt);
757 i915_vm_put(fetch_and_zero(>->vm));
759 intel_gt_pm_fini(gt);
760 intel_gt_fini_scratch(gt);
763 intel_gt_set_wedged_on_init(gt);
764 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
768 void intel_gt_driver_remove(struct intel_gt *gt)
770 __intel_gt_disable(gt);
772 intel_migrate_fini(>->migrate);
773 intel_uc_driver_remove(>->uc);
775 intel_engines_release(gt);
777 intel_gt_flush_buffer_pool(gt);
780 void intel_gt_driver_unregister(struct intel_gt *gt)
782 intel_wakeref_t wakeref;
784 intel_gt_sysfs_unregister(gt);
785 intel_rps_driver_unregister(>->rps);
786 intel_gsc_fini(>->gsc);
789 * Upon unregistering the device to prevent any new users, cancel
790 * all in-flight requests so that we can quickly unbind the active
793 intel_gt_set_wedged_on_fini(gt);
795 /* Scrub all HW state upon release */
796 with_intel_runtime_pm(gt->uncore->rpm, wakeref)
797 __intel_gt_reset(gt, ALL_ENGINES);
800 void intel_gt_driver_release(struct intel_gt *gt)
802 struct i915_address_space *vm;
804 vm = fetch_and_zero(>->vm);
805 if (vm) /* FIXME being called twice on error paths :( */
808 intel_wa_list_free(>->wa_list);
809 intel_gt_pm_fini(gt);
810 intel_gt_fini_scratch(gt);
811 intel_gt_fini_buffer_pool(gt);
812 intel_gt_fini_hwconfig(gt);
815 void intel_gt_driver_late_release_all(struct drm_i915_private *i915)
820 /* We need to wait for inflight RCU frees to release their grip */
823 for_each_gt(gt, i915, id) {
824 intel_uc_driver_late_release(>->uc);
825 intel_gt_fini_requests(gt);
826 intel_gt_fini_reset(gt);
827 intel_gt_fini_timelines(gt);
828 mutex_destroy(>->tlb.invalidate_lock);
829 intel_engines_free(gt);
833 static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr)
837 if (!gt_is_root(gt)) {
838 struct intel_uncore *uncore;
839 spinlock_t *irq_lock;
841 uncore = drmm_kzalloc(>->i915->drm, sizeof(*uncore), GFP_KERNEL);
845 irq_lock = drmm_kzalloc(>->i915->drm, sizeof(*irq_lock), GFP_KERNEL);
850 gt->irq_lock = irq_lock;
852 intel_gt_common_init_early(gt);
855 intel_uncore_init_early(gt->uncore, gt);
857 ret = intel_uncore_setup_mmio(gt->uncore, phys_addr);
861 gt->phys_addr = phys_addr;
866 int intel_gt_probe_all(struct drm_i915_private *i915)
868 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
869 struct intel_gt *gt = &i915->gt0;
870 const struct intel_gt_definition *gtdef;
871 phys_addr_t phys_addr;
872 unsigned int mmio_bar;
876 mmio_bar = intel_mmio_bar(GRAPHICS_VER(i915));
877 phys_addr = pci_resource_start(pdev, mmio_bar);
880 * We always have at least one primary GT on any device
881 * and it has been already initialized early during probe
882 * in i915_driver_probe()
885 gt->name = "Primary GT";
886 gt->info.engine_mask = RUNTIME_INFO(i915)->platform_engine_mask;
888 drm_dbg(&i915->drm, "Setting up %s\n", gt->name);
889 ret = intel_gt_tile_setup(gt, phys_addr);
895 if (!HAS_EXTRA_GT_LIST(i915))
898 for (i = 1, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1];
900 i++, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1]) {
901 gt = drmm_kzalloc(&i915->drm, sizeof(*gt), GFP_KERNEL);
908 gt->name = gtdef->name;
909 gt->type = gtdef->type;
910 gt->info.engine_mask = gtdef->engine_mask;
913 drm_dbg(&i915->drm, "Setting up %s\n", gt->name);
914 if (GEM_WARN_ON(range_overflows_t(resource_size_t,
917 pci_resource_len(pdev, mmio_bar)))) {
922 switch (gtdef->type) {
924 ret = intel_gt_tile_setup(gt, phys_addr + gtdef->mapping_base);
928 ret = intel_sa_mediagt_setup(gt, phys_addr + gtdef->mapping_base,
933 /* Primary GT should not appear in extra GT list */
935 MISSING_CASE(gtdef->type);
948 i915_probe_error(i915, "Failed to initialize %s! (%d)\n", gtdef->name, ret);
949 intel_gt_release_all(i915);
954 int intel_gt_tiles_init(struct drm_i915_private *i915)
960 for_each_gt(gt, i915, id) {
961 ret = intel_gt_probe_lmem(gt);
969 void intel_gt_release_all(struct drm_i915_private *i915)
974 for_each_gt(gt, i915, id)
978 void intel_gt_info_print(const struct intel_gt_info *info,
979 struct drm_printer *p)
981 drm_printf(p, "available engines: %x\n", info->engine_mask);
983 intel_sseu_dump(&info->sseu, p);
989 i915_mcr_reg_t mcr_reg;
994 static struct reg_and_bit
995 get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8,
996 const i915_reg_t *regs, const unsigned int num)
998 const unsigned int class = engine->class;
999 struct reg_and_bit rb = { };
1001 if (drm_WARN_ON_ONCE(&engine->i915->drm,
1002 class >= num || !regs[class].reg))
1005 rb.reg = regs[class];
1006 if (gen8 && class == VIDEO_DECODE_CLASS)
1007 rb.reg.reg += 4 * engine->instance; /* GEN8_M2TCR */
1009 rb.bit = engine->instance;
1011 rb.bit = BIT(rb.bit);
1017 * HW architecture suggest typical invalidation time at 40us,
1018 * with pessimistic cases up to 100us and a recommendation to
1019 * cap at 1ms. We go a bit higher just in case.
1021 #define TLB_INVAL_TIMEOUT_US 100
1022 #define TLB_INVAL_TIMEOUT_MS 4
1025 * On Xe_HP the TLB invalidation registers are located at the same MMIO offsets
1026 * but are now considered MCR registers. Since they exist within a GAM range,
1027 * the primary instance of the register rolls up the status from each unit.
1029 static int wait_for_invalidate(struct intel_gt *gt, struct reg_and_bit rb)
1031 if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50))
1032 return intel_gt_mcr_wait_for_reg(gt, rb.mcr_reg, rb.bit, 0,
1033 TLB_INVAL_TIMEOUT_US,
1034 TLB_INVAL_TIMEOUT_MS);
1036 return __intel_wait_for_register_fw(gt->uncore, rb.reg, rb.bit, 0,
1037 TLB_INVAL_TIMEOUT_US,
1038 TLB_INVAL_TIMEOUT_MS,
1042 static void mmio_invalidate_full(struct intel_gt *gt)
1044 static const i915_reg_t gen8_regs[] = {
1045 [RENDER_CLASS] = GEN8_RTCR,
1046 [VIDEO_DECODE_CLASS] = GEN8_M1TCR, /* , GEN8_M2TCR */
1047 [VIDEO_ENHANCEMENT_CLASS] = GEN8_VTCR,
1048 [COPY_ENGINE_CLASS] = GEN8_BTCR,
1050 static const i915_reg_t gen12_regs[] = {
1051 [RENDER_CLASS] = GEN12_GFX_TLB_INV_CR,
1052 [VIDEO_DECODE_CLASS] = GEN12_VD_TLB_INV_CR,
1053 [VIDEO_ENHANCEMENT_CLASS] = GEN12_VE_TLB_INV_CR,
1054 [COPY_ENGINE_CLASS] = GEN12_BLT_TLB_INV_CR,
1055 [COMPUTE_CLASS] = GEN12_COMPCTX_TLB_INV_CR,
1057 static const i915_mcr_reg_t xehp_regs[] = {
1058 [RENDER_CLASS] = XEHP_GFX_TLB_INV_CR,
1059 [VIDEO_DECODE_CLASS] = XEHP_VD_TLB_INV_CR,
1060 [VIDEO_ENHANCEMENT_CLASS] = XEHP_VE_TLB_INV_CR,
1061 [COPY_ENGINE_CLASS] = XEHP_BLT_TLB_INV_CR,
1062 [COMPUTE_CLASS] = XEHP_COMPCTX_TLB_INV_CR,
1064 struct drm_i915_private *i915 = gt->i915;
1065 struct intel_uncore *uncore = gt->uncore;
1066 struct intel_engine_cs *engine;
1067 intel_engine_mask_t awake, tmp;
1068 enum intel_engine_id id;
1069 const i915_reg_t *regs;
1070 unsigned int num = 0;
1071 unsigned long flags;
1073 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
1075 num = ARRAY_SIZE(xehp_regs);
1076 } else if (GRAPHICS_VER(i915) == 12) {
1078 num = ARRAY_SIZE(gen12_regs);
1079 } else if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) <= 11) {
1081 num = ARRAY_SIZE(gen8_regs);
1082 } else if (GRAPHICS_VER(i915) < 8) {
1086 if (drm_WARN_ONCE(&i915->drm, !num,
1087 "Platform does not implement TLB invalidation!"))
1090 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
1092 intel_gt_mcr_lock(gt, &flags);
1093 spin_lock(&uncore->lock); /* serialise invalidate with GT reset */
1096 for_each_engine(engine, gt, id) {
1097 struct reg_and_bit rb;
1099 if (!intel_engine_pm_is_awake(engine))
1102 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
1103 u32 val = BIT(engine->instance);
1105 if (engine->class == VIDEO_DECODE_CLASS ||
1106 engine->class == VIDEO_ENHANCEMENT_CLASS ||
1107 engine->class == COMPUTE_CLASS)
1108 val = _MASKED_BIT_ENABLE(val);
1109 intel_gt_mcr_multicast_write_fw(gt,
1110 xehp_regs[engine->class],
1113 rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
1114 if (!i915_mmio_reg_offset(rb.reg))
1117 if (GRAPHICS_VER(i915) == 12 && (engine->class == VIDEO_DECODE_CLASS ||
1118 engine->class == VIDEO_ENHANCEMENT_CLASS ||
1119 engine->class == COMPUTE_CLASS))
1120 rb.bit = _MASKED_BIT_ENABLE(rb.bit);
1122 intel_uncore_write_fw(uncore, rb.reg, rb.bit);
1124 awake |= engine->mask;
1127 GT_TRACE(gt, "invalidated engines %08x\n", awake);
1129 /* Wa_2207587034:tgl,dg1,rkl,adl-s,adl-p */
1131 (IS_TIGERLAKE(i915) ||
1133 IS_ROCKETLAKE(i915) ||
1134 IS_ALDERLAKE_S(i915) ||
1135 IS_ALDERLAKE_P(i915)))
1136 intel_uncore_write_fw(uncore, GEN12_OA_TLB_INV_CR, 1);
1138 spin_unlock(&uncore->lock);
1139 intel_gt_mcr_unlock(gt, flags);
1141 for_each_engine_masked(engine, gt, awake, tmp) {
1142 struct reg_and_bit rb;
1144 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
1145 rb.mcr_reg = xehp_regs[engine->class];
1146 rb.bit = BIT(engine->instance);
1148 rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
1151 if (wait_for_invalidate(gt, rb))
1152 drm_err_ratelimited(>->i915->drm,
1153 "%s TLB invalidation did not complete in %ums!\n",
1154 engine->name, TLB_INVAL_TIMEOUT_MS);
1158 * Use delayed put since a) we mostly expect a flurry of TLB
1159 * invalidations so it is good to avoid paying the forcewake cost and
1160 * b) it works around a bug in Icelake which cannot cope with too rapid
1163 intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL);
1166 static bool tlb_seqno_passed(const struct intel_gt *gt, u32 seqno)
1168 u32 cur = intel_gt_tlb_seqno(gt);
1170 /* Only skip if a *full* TLB invalidate barrier has passed */
1171 return (s32)(cur - ALIGN(seqno, 2)) > 0;
1174 void intel_gt_invalidate_tlb(struct intel_gt *gt, u32 seqno)
1176 intel_wakeref_t wakeref;
1178 if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
1181 if (intel_gt_is_wedged(gt))
1184 if (tlb_seqno_passed(gt, seqno))
1187 with_intel_gt_pm_if_awake(gt, wakeref) {
1188 mutex_lock(>->tlb.invalidate_lock);
1189 if (tlb_seqno_passed(gt, seqno))
1192 mmio_invalidate_full(gt);
1194 write_seqcount_invalidate(>->tlb.seqno);
1196 mutex_unlock(>->tlb.invalidate_lock);