Merge tag 'drm-intel-next-2022-08-29' of git://anongit.freedesktop.org/drm/drm-intel...
[linux-2.6-block.git] / drivers / gpu / drm / i915 / gt / intel_gt.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5
6 #include <drm/drm_managed.h>
7 #include <drm/intel-gtt.h>
8
9 #include "gem/i915_gem_internal.h"
10 #include "gem/i915_gem_lmem.h"
11 #include "pxp/intel_pxp.h"
12
13 #include "i915_drv.h"
14 #include "i915_perf_oa_regs.h"
15 #include "intel_context.h"
16 #include "intel_engine_pm.h"
17 #include "intel_engine_regs.h"
18 #include "intel_ggtt_gmch.h"
19 #include "intel_gt.h"
20 #include "intel_gt_buffer_pool.h"
21 #include "intel_gt_clock_utils.h"
22 #include "intel_gt_debugfs.h"
23 #include "intel_gt_mcr.h"
24 #include "intel_gt_pm.h"
25 #include "intel_gt_regs.h"
26 #include "intel_gt_requests.h"
27 #include "intel_migrate.h"
28 #include "intel_mocs.h"
29 #include "intel_pci_config.h"
30 #include "intel_pm.h"
31 #include "intel_rc6.h"
32 #include "intel_renderstate.h"
33 #include "intel_rps.h"
34 #include "intel_gt_sysfs.h"
35 #include "intel_uncore.h"
36 #include "shmem_utils.h"
37
38 static void __intel_gt_init_early(struct intel_gt *gt)
39 {
40         spin_lock_init(&gt->irq_lock);
41
42         INIT_LIST_HEAD(&gt->closed_vma);
43         spin_lock_init(&gt->closed_lock);
44
45         init_llist_head(&gt->watchdog.list);
46         INIT_WORK(&gt->watchdog.work, intel_gt_watchdog_work);
47
48         intel_gt_init_buffer_pool(gt);
49         intel_gt_init_reset(gt);
50         intel_gt_init_requests(gt);
51         intel_gt_init_timelines(gt);
52         mutex_init(&gt->tlb.invalidate_lock);
53         seqcount_mutex_init(&gt->tlb.seqno, &gt->tlb.invalidate_lock);
54         intel_gt_pm_init_early(gt);
55
56         intel_uc_init_early(&gt->uc);
57         intel_rps_init_early(&gt->rps);
58 }
59
60 /* Preliminary initialization of Tile 0 */
61 void intel_root_gt_init_early(struct drm_i915_private *i915)
62 {
63         struct intel_gt *gt = to_gt(i915);
64
65         gt->i915 = i915;
66         gt->uncore = &i915->uncore;
67
68         __intel_gt_init_early(gt);
69 }
70
71 static int intel_gt_probe_lmem(struct intel_gt *gt)
72 {
73         struct drm_i915_private *i915 = gt->i915;
74         unsigned int instance = gt->info.id;
75         int id = INTEL_REGION_LMEM_0 + instance;
76         struct intel_memory_region *mem;
77         int err;
78
79         mem = intel_gt_setup_lmem(gt);
80         if (IS_ERR(mem)) {
81                 err = PTR_ERR(mem);
82                 if (err == -ENODEV)
83                         return 0;
84
85                 drm_err(&i915->drm,
86                         "Failed to setup region(%d) type=%d\n",
87                         err, INTEL_MEMORY_LOCAL);
88                 return err;
89         }
90
91         mem->id = id;
92         mem->instance = instance;
93
94         intel_memory_region_set_name(mem, "local%u", mem->instance);
95
96         GEM_BUG_ON(!HAS_REGION(i915, id));
97         GEM_BUG_ON(i915->mm.regions[id]);
98         i915->mm.regions[id] = mem;
99
100         return 0;
101 }
102
103 int intel_gt_assign_ggtt(struct intel_gt *gt)
104 {
105         gt->ggtt = drmm_kzalloc(&gt->i915->drm, sizeof(*gt->ggtt), GFP_KERNEL);
106
107         return gt->ggtt ? 0 : -ENOMEM;
108 }
109
110 int intel_gt_init_mmio(struct intel_gt *gt)
111 {
112         intel_gt_init_clock_frequency(gt);
113
114         intel_uc_init_mmio(&gt->uc);
115         intel_sseu_info_init(gt);
116         intel_gt_mcr_init(gt);
117
118         return intel_engines_init_mmio(gt);
119 }
120
121 static void init_unused_ring(struct intel_gt *gt, u32 base)
122 {
123         struct intel_uncore *uncore = gt->uncore;
124
125         intel_uncore_write(uncore, RING_CTL(base), 0);
126         intel_uncore_write(uncore, RING_HEAD(base), 0);
127         intel_uncore_write(uncore, RING_TAIL(base), 0);
128         intel_uncore_write(uncore, RING_START(base), 0);
129 }
130
131 static void init_unused_rings(struct intel_gt *gt)
132 {
133         struct drm_i915_private *i915 = gt->i915;
134
135         if (IS_I830(i915)) {
136                 init_unused_ring(gt, PRB1_BASE);
137                 init_unused_ring(gt, SRB0_BASE);
138                 init_unused_ring(gt, SRB1_BASE);
139                 init_unused_ring(gt, SRB2_BASE);
140                 init_unused_ring(gt, SRB3_BASE);
141         } else if (GRAPHICS_VER(i915) == 2) {
142                 init_unused_ring(gt, SRB0_BASE);
143                 init_unused_ring(gt, SRB1_BASE);
144         } else if (GRAPHICS_VER(i915) == 3) {
145                 init_unused_ring(gt, PRB1_BASE);
146                 init_unused_ring(gt, PRB2_BASE);
147         }
148 }
149
150 int intel_gt_init_hw(struct intel_gt *gt)
151 {
152         struct drm_i915_private *i915 = gt->i915;
153         struct intel_uncore *uncore = gt->uncore;
154         int ret;
155
156         gt->last_init_time = ktime_get();
157
158         /* Double layer security blanket, see i915_gem_init() */
159         intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
160
161         if (HAS_EDRAM(i915) && GRAPHICS_VER(i915) < 9)
162                 intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
163
164         if (IS_HASWELL(i915))
165                 intel_uncore_write(uncore,
166                                    HSW_MI_PREDICATE_RESULT_2,
167                                    IS_HSW_GT3(i915) ?
168                                    LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
169
170         /* Apply the GT workarounds... */
171         intel_gt_apply_workarounds(gt);
172         /* ...and determine whether they are sticking. */
173         intel_gt_verify_workarounds(gt, "init");
174
175         intel_gt_init_swizzling(gt);
176
177         /*
178          * At least 830 can leave some of the unused rings
179          * "active" (ie. head != tail) after resume which
180          * will prevent c3 entry. Makes sure all unused rings
181          * are totally idle.
182          */
183         init_unused_rings(gt);
184
185         ret = i915_ppgtt_init_hw(gt);
186         if (ret) {
187                 DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
188                 goto out;
189         }
190
191         /* We can't enable contexts until all firmware is loaded */
192         ret = intel_uc_init_hw(&gt->uc);
193         if (ret) {
194                 i915_probe_error(i915, "Enabling uc failed (%d)\n", ret);
195                 goto out;
196         }
197
198         intel_mocs_init(gt);
199
200 out:
201         intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
202         return ret;
203 }
204
205 static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
206 {
207         intel_uncore_rmw(uncore, reg, 0, set);
208 }
209
210 static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
211 {
212         intel_uncore_rmw(uncore, reg, clr, 0);
213 }
214
215 static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
216 {
217         intel_uncore_rmw(uncore, reg, 0, 0);
218 }
219
220 static void gen6_clear_engine_error_register(struct intel_engine_cs *engine)
221 {
222         GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
223         GEN6_RING_FAULT_REG_POSTING_READ(engine);
224 }
225
226 void
227 intel_gt_clear_error_registers(struct intel_gt *gt,
228                                intel_engine_mask_t engine_mask)
229 {
230         struct drm_i915_private *i915 = gt->i915;
231         struct intel_uncore *uncore = gt->uncore;
232         u32 eir;
233
234         if (GRAPHICS_VER(i915) != 2)
235                 clear_register(uncore, PGTBL_ER);
236
237         if (GRAPHICS_VER(i915) < 4)
238                 clear_register(uncore, IPEIR(RENDER_RING_BASE));
239         else
240                 clear_register(uncore, IPEIR_I965);
241
242         clear_register(uncore, EIR);
243         eir = intel_uncore_read(uncore, EIR);
244         if (eir) {
245                 /*
246                  * some errors might have become stuck,
247                  * mask them.
248                  */
249                 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
250                 rmw_set(uncore, EMR, eir);
251                 intel_uncore_write(uncore, GEN2_IIR,
252                                    I915_MASTER_ERROR_INTERRUPT);
253         }
254
255         if (GRAPHICS_VER(i915) >= 12) {
256                 rmw_clear(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID);
257                 intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG);
258         } else if (GRAPHICS_VER(i915) >= 8) {
259                 rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
260                 intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
261         } else if (GRAPHICS_VER(i915) >= 6) {
262                 struct intel_engine_cs *engine;
263                 enum intel_engine_id id;
264
265                 for_each_engine_masked(engine, gt, engine_mask, id)
266                         gen6_clear_engine_error_register(engine);
267         }
268 }
269
270 static void gen6_check_faults(struct intel_gt *gt)
271 {
272         struct intel_engine_cs *engine;
273         enum intel_engine_id id;
274         u32 fault;
275
276         for_each_engine(engine, gt, id) {
277                 fault = GEN6_RING_FAULT_REG_READ(engine);
278                 if (fault & RING_FAULT_VALID) {
279                         drm_dbg(&engine->i915->drm, "Unexpected fault\n"
280                                 "\tAddr: 0x%08lx\n"
281                                 "\tAddress space: %s\n"
282                                 "\tSource ID: %d\n"
283                                 "\tType: %d\n",
284                                 fault & PAGE_MASK,
285                                 fault & RING_FAULT_GTTSEL_MASK ?
286                                 "GGTT" : "PPGTT",
287                                 RING_FAULT_SRCID(fault),
288                                 RING_FAULT_FAULT_TYPE(fault));
289                 }
290         }
291 }
292
293 static void gen8_check_faults(struct intel_gt *gt)
294 {
295         struct intel_uncore *uncore = gt->uncore;
296         i915_reg_t fault_reg, fault_data0_reg, fault_data1_reg;
297         u32 fault;
298
299         if (GRAPHICS_VER(gt->i915) >= 12) {
300                 fault_reg = GEN12_RING_FAULT_REG;
301                 fault_data0_reg = GEN12_FAULT_TLB_DATA0;
302                 fault_data1_reg = GEN12_FAULT_TLB_DATA1;
303         } else {
304                 fault_reg = GEN8_RING_FAULT_REG;
305                 fault_data0_reg = GEN8_FAULT_TLB_DATA0;
306                 fault_data1_reg = GEN8_FAULT_TLB_DATA1;
307         }
308
309         fault = intel_uncore_read(uncore, fault_reg);
310         if (fault & RING_FAULT_VALID) {
311                 u32 fault_data0, fault_data1;
312                 u64 fault_addr;
313
314                 fault_data0 = intel_uncore_read(uncore, fault_data0_reg);
315                 fault_data1 = intel_uncore_read(uncore, fault_data1_reg);
316
317                 fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
318                              ((u64)fault_data0 << 12);
319
320                 drm_dbg(&uncore->i915->drm, "Unexpected fault\n"
321                         "\tAddr: 0x%08x_%08x\n"
322                         "\tAddress space: %s\n"
323                         "\tEngine ID: %d\n"
324                         "\tSource ID: %d\n"
325                         "\tType: %d\n",
326                         upper_32_bits(fault_addr), lower_32_bits(fault_addr),
327                         fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
328                         GEN8_RING_FAULT_ENGINE_ID(fault),
329                         RING_FAULT_SRCID(fault),
330                         RING_FAULT_FAULT_TYPE(fault));
331         }
332 }
333
334 void intel_gt_check_and_clear_faults(struct intel_gt *gt)
335 {
336         struct drm_i915_private *i915 = gt->i915;
337
338         /* From GEN8 onwards we only have one 'All Engine Fault Register' */
339         if (GRAPHICS_VER(i915) >= 8)
340                 gen8_check_faults(gt);
341         else if (GRAPHICS_VER(i915) >= 6)
342                 gen6_check_faults(gt);
343         else
344                 return;
345
346         intel_gt_clear_error_registers(gt, ALL_ENGINES);
347 }
348
349 void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
350 {
351         struct intel_uncore *uncore = gt->uncore;
352         intel_wakeref_t wakeref;
353
354         /*
355          * No actual flushing is required for the GTT write domain for reads
356          * from the GTT domain. Writes to it "immediately" go to main memory
357          * as far as we know, so there's no chipset flush. It also doesn't
358          * land in the GPU render cache.
359          *
360          * However, we do have to enforce the order so that all writes through
361          * the GTT land before any writes to the device, such as updates to
362          * the GATT itself.
363          *
364          * We also have to wait a bit for the writes to land from the GTT.
365          * An uncached read (i.e. mmio) seems to be ideal for the round-trip
366          * timing. This issue has only been observed when switching quickly
367          * between GTT writes and CPU reads from inside the kernel on recent hw,
368          * and it appears to only affect discrete GTT blocks (i.e. on LLC
369          * system agents we cannot reproduce this behaviour, until Cannonlake
370          * that was!).
371          */
372
373         wmb();
374
375         if (INTEL_INFO(gt->i915)->has_coherent_ggtt)
376                 return;
377
378         intel_gt_chipset_flush(gt);
379
380         with_intel_runtime_pm_if_in_use(uncore->rpm, wakeref) {
381                 unsigned long flags;
382
383                 spin_lock_irqsave(&uncore->lock, flags);
384                 intel_uncore_posting_read_fw(uncore,
385                                              RING_HEAD(RENDER_RING_BASE));
386                 spin_unlock_irqrestore(&uncore->lock, flags);
387         }
388 }
389
390 void intel_gt_chipset_flush(struct intel_gt *gt)
391 {
392         wmb();
393         if (GRAPHICS_VER(gt->i915) < 6)
394                 intel_ggtt_gmch_flush();
395 }
396
397 void intel_gt_driver_register(struct intel_gt *gt)
398 {
399         intel_gsc_init(&gt->gsc, gt->i915);
400
401         intel_rps_driver_register(&gt->rps);
402
403         intel_gt_debugfs_register(gt);
404         intel_gt_sysfs_register(gt);
405 }
406
407 static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
408 {
409         struct drm_i915_private *i915 = gt->i915;
410         struct drm_i915_gem_object *obj;
411         struct i915_vma *vma;
412         int ret;
413
414         obj = i915_gem_object_create_lmem(i915, size,
415                                           I915_BO_ALLOC_VOLATILE |
416                                           I915_BO_ALLOC_GPU_ONLY);
417         if (IS_ERR(obj))
418                 obj = i915_gem_object_create_stolen(i915, size);
419         if (IS_ERR(obj))
420                 obj = i915_gem_object_create_internal(i915, size);
421         if (IS_ERR(obj)) {
422                 drm_err(&i915->drm, "Failed to allocate scratch page\n");
423                 return PTR_ERR(obj);
424         }
425
426         vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
427         if (IS_ERR(vma)) {
428                 ret = PTR_ERR(vma);
429                 goto err_unref;
430         }
431
432         ret = i915_ggtt_pin(vma, NULL, 0, PIN_HIGH);
433         if (ret)
434                 goto err_unref;
435
436         gt->scratch = i915_vma_make_unshrinkable(vma);
437
438         return 0;
439
440 err_unref:
441         i915_gem_object_put(obj);
442         return ret;
443 }
444
445 static void intel_gt_fini_scratch(struct intel_gt *gt)
446 {
447         i915_vma_unpin_and_release(&gt->scratch, 0);
448 }
449
450 static struct i915_address_space *kernel_vm(struct intel_gt *gt)
451 {
452         if (INTEL_PPGTT(gt->i915) > INTEL_PPGTT_ALIASING)
453                 return &i915_ppgtt_create(gt, I915_BO_ALLOC_PM_EARLY)->vm;
454         else
455                 return i915_vm_get(&gt->ggtt->vm);
456 }
457
458 static int __engines_record_defaults(struct intel_gt *gt)
459 {
460         struct i915_request *requests[I915_NUM_ENGINES] = {};
461         struct intel_engine_cs *engine;
462         enum intel_engine_id id;
463         int err = 0;
464
465         /*
466          * As we reset the gpu during very early sanitisation, the current
467          * register state on the GPU should reflect its defaults values.
468          * We load a context onto the hw (with restore-inhibit), then switch
469          * over to a second context to save that default register state. We
470          * can then prime every new context with that state so they all start
471          * from the same default HW values.
472          */
473
474         for_each_engine(engine, gt, id) {
475                 struct intel_renderstate so;
476                 struct intel_context *ce;
477                 struct i915_request *rq;
478
479                 /* We must be able to switch to something! */
480                 GEM_BUG_ON(!engine->kernel_context);
481
482                 ce = intel_context_create(engine);
483                 if (IS_ERR(ce)) {
484                         err = PTR_ERR(ce);
485                         goto out;
486                 }
487
488                 err = intel_renderstate_init(&so, ce);
489                 if (err)
490                         goto err;
491
492                 rq = i915_request_create(ce);
493                 if (IS_ERR(rq)) {
494                         err = PTR_ERR(rq);
495                         goto err_fini;
496                 }
497
498                 err = intel_engine_emit_ctx_wa(rq);
499                 if (err)
500                         goto err_rq;
501
502                 err = intel_renderstate_emit(&so, rq);
503                 if (err)
504                         goto err_rq;
505
506 err_rq:
507                 requests[id] = i915_request_get(rq);
508                 i915_request_add(rq);
509 err_fini:
510                 intel_renderstate_fini(&so, ce);
511 err:
512                 if (err) {
513                         intel_context_put(ce);
514                         goto out;
515                 }
516         }
517
518         /* Flush the default context image to memory, and enable powersaving. */
519         if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
520                 err = -EIO;
521                 goto out;
522         }
523
524         for (id = 0; id < ARRAY_SIZE(requests); id++) {
525                 struct i915_request *rq;
526                 struct file *state;
527
528                 rq = requests[id];
529                 if (!rq)
530                         continue;
531
532                 if (rq->fence.error) {
533                         err = -EIO;
534                         goto out;
535                 }
536
537                 GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags));
538                 if (!rq->context->state)
539                         continue;
540
541                 /* Keep a copy of the state's backing pages; free the obj */
542                 state = shmem_create_from_object(rq->context->state->obj);
543                 if (IS_ERR(state)) {
544                         err = PTR_ERR(state);
545                         goto out;
546                 }
547                 rq->engine->default_state = state;
548         }
549
550 out:
551         /*
552          * If we have to abandon now, we expect the engines to be idle
553          * and ready to be torn-down. The quickest way we can accomplish
554          * this is by declaring ourselves wedged.
555          */
556         if (err)
557                 intel_gt_set_wedged(gt);
558
559         for (id = 0; id < ARRAY_SIZE(requests); id++) {
560                 struct intel_context *ce;
561                 struct i915_request *rq;
562
563                 rq = requests[id];
564                 if (!rq)
565                         continue;
566
567                 ce = rq->context;
568                 i915_request_put(rq);
569                 intel_context_put(ce);
570         }
571         return err;
572 }
573
574 static int __engines_verify_workarounds(struct intel_gt *gt)
575 {
576         struct intel_engine_cs *engine;
577         enum intel_engine_id id;
578         int err = 0;
579
580         if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
581                 return 0;
582
583         for_each_engine(engine, gt, id) {
584                 if (intel_engine_verify_workarounds(engine, "load"))
585                         err = -EIO;
586         }
587
588         /* Flush and restore the kernel context for safety */
589         if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME)
590                 err = -EIO;
591
592         return err;
593 }
594
595 static void __intel_gt_disable(struct intel_gt *gt)
596 {
597         intel_gt_set_wedged_on_fini(gt);
598
599         intel_gt_suspend_prepare(gt);
600         intel_gt_suspend_late(gt);
601
602         GEM_BUG_ON(intel_gt_pm_is_awake(gt));
603 }
604
605 int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
606 {
607         long remaining_timeout;
608
609         /* If the device is asleep, we have no requests outstanding */
610         if (!intel_gt_pm_is_awake(gt))
611                 return 0;
612
613         while ((timeout = intel_gt_retire_requests_timeout(gt, timeout,
614                                                            &remaining_timeout)) > 0) {
615                 cond_resched();
616                 if (signal_pending(current))
617                         return -EINTR;
618         }
619
620         return timeout ? timeout : intel_uc_wait_for_idle(&gt->uc,
621                                                           remaining_timeout);
622 }
623
624 int intel_gt_init(struct intel_gt *gt)
625 {
626         int err;
627
628         err = i915_inject_probe_error(gt->i915, -ENODEV);
629         if (err)
630                 return err;
631
632         intel_gt_init_workarounds(gt);
633
634         /*
635          * This is just a security blanket to placate dragons.
636          * On some systems, we very sporadically observe that the first TLBs
637          * used by the CS may be stale, despite us poking the TLB reset. If
638          * we hold the forcewake during initialisation these problems
639          * just magically go away.
640          */
641         intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
642
643         err = intel_gt_init_scratch(gt,
644                                     GRAPHICS_VER(gt->i915) == 2 ? SZ_256K : SZ_4K);
645         if (err)
646                 goto out_fw;
647
648         intel_gt_pm_init(gt);
649
650         gt->vm = kernel_vm(gt);
651         if (!gt->vm) {
652                 err = -ENOMEM;
653                 goto err_pm;
654         }
655
656         intel_set_mocs_index(gt);
657
658         err = intel_engines_init(gt);
659         if (err)
660                 goto err_engines;
661
662         err = intel_uc_init(&gt->uc);
663         if (err)
664                 goto err_engines;
665
666         err = intel_gt_resume(gt);
667         if (err)
668                 goto err_uc_init;
669
670         err = intel_gt_init_hwconfig(gt);
671         if (err)
672                 drm_err(&gt->i915->drm, "Failed to retrieve hwconfig table: %pe\n",
673                         ERR_PTR(err));
674
675         err = __engines_record_defaults(gt);
676         if (err)
677                 goto err_gt;
678
679         err = __engines_verify_workarounds(gt);
680         if (err)
681                 goto err_gt;
682
683         intel_uc_init_late(&gt->uc);
684
685         err = i915_inject_probe_error(gt->i915, -EIO);
686         if (err)
687                 goto err_gt;
688
689         intel_migrate_init(&gt->migrate, gt);
690
691         intel_pxp_init(&gt->pxp);
692
693         goto out_fw;
694 err_gt:
695         __intel_gt_disable(gt);
696         intel_uc_fini_hw(&gt->uc);
697 err_uc_init:
698         intel_uc_fini(&gt->uc);
699 err_engines:
700         intel_engines_release(gt);
701         i915_vm_put(fetch_and_zero(&gt->vm));
702 err_pm:
703         intel_gt_pm_fini(gt);
704         intel_gt_fini_scratch(gt);
705 out_fw:
706         if (err)
707                 intel_gt_set_wedged_on_init(gt);
708         intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
709         return err;
710 }
711
712 void intel_gt_driver_remove(struct intel_gt *gt)
713 {
714         __intel_gt_disable(gt);
715
716         intel_migrate_fini(&gt->migrate);
717         intel_uc_driver_remove(&gt->uc);
718
719         intel_engines_release(gt);
720
721         intel_gt_flush_buffer_pool(gt);
722 }
723
724 void intel_gt_driver_unregister(struct intel_gt *gt)
725 {
726         intel_wakeref_t wakeref;
727
728         intel_gt_sysfs_unregister(gt);
729         intel_rps_driver_unregister(&gt->rps);
730         intel_gsc_fini(&gt->gsc);
731
732         intel_pxp_fini(&gt->pxp);
733
734         /*
735          * Upon unregistering the device to prevent any new users, cancel
736          * all in-flight requests so that we can quickly unbind the active
737          * resources.
738          */
739         intel_gt_set_wedged_on_fini(gt);
740
741         /* Scrub all HW state upon release */
742         with_intel_runtime_pm(gt->uncore->rpm, wakeref)
743                 __intel_gt_reset(gt, ALL_ENGINES);
744 }
745
746 void intel_gt_driver_release(struct intel_gt *gt)
747 {
748         struct i915_address_space *vm;
749
750         vm = fetch_and_zero(&gt->vm);
751         if (vm) /* FIXME being called twice on error paths :( */
752                 i915_vm_put(vm);
753
754         intel_wa_list_free(&gt->wa_list);
755         intel_gt_pm_fini(gt);
756         intel_gt_fini_scratch(gt);
757         intel_gt_fini_buffer_pool(gt);
758         intel_gt_fini_hwconfig(gt);
759 }
760
761 void intel_gt_driver_late_release_all(struct drm_i915_private *i915)
762 {
763         struct intel_gt *gt;
764         unsigned int id;
765
766         /* We need to wait for inflight RCU frees to release their grip */
767         rcu_barrier();
768
769         for_each_gt(gt, i915, id) {
770                 intel_uc_driver_late_release(&gt->uc);
771                 intel_gt_fini_requests(gt);
772                 intel_gt_fini_reset(gt);
773                 intel_gt_fini_timelines(gt);
774                 mutex_destroy(&gt->tlb.invalidate_lock);
775                 intel_engines_free(gt);
776         }
777 }
778
779 static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr)
780 {
781         int ret;
782
783         if (!gt_is_root(gt)) {
784                 struct intel_uncore_mmio_debug *mmio_debug;
785                 struct intel_uncore *uncore;
786
787                 uncore = kzalloc(sizeof(*uncore), GFP_KERNEL);
788                 if (!uncore)
789                         return -ENOMEM;
790
791                 mmio_debug = kzalloc(sizeof(*mmio_debug), GFP_KERNEL);
792                 if (!mmio_debug) {
793                         kfree(uncore);
794                         return -ENOMEM;
795                 }
796
797                 gt->uncore = uncore;
798                 gt->uncore->debug = mmio_debug;
799
800                 __intel_gt_init_early(gt);
801         }
802
803         intel_uncore_init_early(gt->uncore, gt);
804
805         ret = intel_uncore_setup_mmio(gt->uncore, phys_addr);
806         if (ret)
807                 return ret;
808
809         gt->phys_addr = phys_addr;
810
811         return 0;
812 }
813
814 static void
815 intel_gt_tile_cleanup(struct intel_gt *gt)
816 {
817         intel_uncore_cleanup_mmio(gt->uncore);
818
819         if (!gt_is_root(gt)) {
820                 kfree(gt->uncore->debug);
821                 kfree(gt->uncore);
822                 kfree(gt);
823         }
824 }
825
826 int intel_gt_probe_all(struct drm_i915_private *i915)
827 {
828         struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
829         struct intel_gt *gt = &i915->gt0;
830         phys_addr_t phys_addr;
831         unsigned int mmio_bar;
832         int ret;
833
834         mmio_bar = GRAPHICS_VER(i915) == 2 ? GEN2_GTTMMADR_BAR : GTTMMADR_BAR;
835         phys_addr = pci_resource_start(pdev, mmio_bar);
836
837         /*
838          * We always have at least one primary GT on any device
839          * and it has been already initialized early during probe
840          * in i915_driver_probe()
841          */
842         ret = intel_gt_tile_setup(gt, phys_addr);
843         if (ret)
844                 return ret;
845
846         i915->gt[0] = gt;
847
848         /* TODO: add more tiles */
849         return 0;
850 }
851
852 int intel_gt_tiles_init(struct drm_i915_private *i915)
853 {
854         struct intel_gt *gt;
855         unsigned int id;
856         int ret;
857
858         for_each_gt(gt, i915, id) {
859                 ret = intel_gt_probe_lmem(gt);
860                 if (ret)
861                         return ret;
862         }
863
864         return 0;
865 }
866
867 void intel_gt_release_all(struct drm_i915_private *i915)
868 {
869         struct intel_gt *gt;
870         unsigned int id;
871
872         for_each_gt(gt, i915, id) {
873                 intel_gt_tile_cleanup(gt);
874                 i915->gt[id] = NULL;
875         }
876 }
877
878 void intel_gt_info_print(const struct intel_gt_info *info,
879                          struct drm_printer *p)
880 {
881         drm_printf(p, "available engines: %x\n", info->engine_mask);
882
883         intel_sseu_dump(&info->sseu, p);
884 }
885
886 struct reg_and_bit {
887         i915_reg_t reg;
888         u32 bit;
889 };
890
891 static struct reg_and_bit
892 get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8,
893                 const i915_reg_t *regs, const unsigned int num)
894 {
895         const unsigned int class = engine->class;
896         struct reg_and_bit rb = { };
897
898         if (drm_WARN_ON_ONCE(&engine->i915->drm,
899                              class >= num || !regs[class].reg))
900                 return rb;
901
902         rb.reg = regs[class];
903         if (gen8 && class == VIDEO_DECODE_CLASS)
904                 rb.reg.reg += 4 * engine->instance; /* GEN8_M2TCR */
905         else
906                 rb.bit = engine->instance;
907
908         rb.bit = BIT(rb.bit);
909
910         return rb;
911 }
912
913 static void mmio_invalidate_full(struct intel_gt *gt)
914 {
915         static const i915_reg_t gen8_regs[] = {
916                 [RENDER_CLASS]                  = GEN8_RTCR,
917                 [VIDEO_DECODE_CLASS]            = GEN8_M1TCR, /* , GEN8_M2TCR */
918                 [VIDEO_ENHANCEMENT_CLASS]       = GEN8_VTCR,
919                 [COPY_ENGINE_CLASS]             = GEN8_BTCR,
920         };
921         static const i915_reg_t gen12_regs[] = {
922                 [RENDER_CLASS]                  = GEN12_GFX_TLB_INV_CR,
923                 [VIDEO_DECODE_CLASS]            = GEN12_VD_TLB_INV_CR,
924                 [VIDEO_ENHANCEMENT_CLASS]       = GEN12_VE_TLB_INV_CR,
925                 [COPY_ENGINE_CLASS]             = GEN12_BLT_TLB_INV_CR,
926                 [COMPUTE_CLASS]                 = GEN12_COMPCTX_TLB_INV_CR,
927         };
928         struct drm_i915_private *i915 = gt->i915;
929         struct intel_uncore *uncore = gt->uncore;
930         struct intel_engine_cs *engine;
931         intel_engine_mask_t awake, tmp;
932         enum intel_engine_id id;
933         const i915_reg_t *regs;
934         unsigned int num = 0;
935
936         if (GRAPHICS_VER(i915) == 12) {
937                 regs = gen12_regs;
938                 num = ARRAY_SIZE(gen12_regs);
939         } else if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) <= 11) {
940                 regs = gen8_regs;
941                 num = ARRAY_SIZE(gen8_regs);
942         } else if (GRAPHICS_VER(i915) < 8) {
943                 return;
944         }
945
946         if (drm_WARN_ONCE(&i915->drm, !num,
947                           "Platform does not implement TLB invalidation!"))
948                 return;
949
950         intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
951
952         spin_lock_irq(&uncore->lock); /* serialise invalidate with GT reset */
953
954         awake = 0;
955         for_each_engine(engine, gt, id) {
956                 struct reg_and_bit rb;
957
958                 if (!intel_engine_pm_is_awake(engine))
959                         continue;
960
961                 rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
962                 if (!i915_mmio_reg_offset(rb.reg))
963                         continue;
964
965                 intel_uncore_write_fw(uncore, rb.reg, rb.bit);
966                 awake |= engine->mask;
967         }
968
969         GT_TRACE(gt, "invalidated engines %08x\n", awake);
970
971         /* Wa_2207587034:tgl,dg1,rkl,adl-s,adl-p */
972         if (awake &&
973             (IS_TIGERLAKE(i915) ||
974              IS_DG1(i915) ||
975              IS_ROCKETLAKE(i915) ||
976              IS_ALDERLAKE_S(i915) ||
977              IS_ALDERLAKE_P(i915)))
978                 intel_uncore_write_fw(uncore, GEN12_OA_TLB_INV_CR, 1);
979
980         spin_unlock_irq(&uncore->lock);
981
982         for_each_engine_masked(engine, gt, awake, tmp) {
983                 struct reg_and_bit rb;
984
985                 /*
986                  * HW architecture suggest typical invalidation time at 40us,
987                  * with pessimistic cases up to 100us and a recommendation to
988                  * cap at 1ms. We go a bit higher just in case.
989                  */
990                 const unsigned int timeout_us = 100;
991                 const unsigned int timeout_ms = 4;
992
993                 rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
994                 if (__intel_wait_for_register_fw(uncore,
995                                                  rb.reg, rb.bit, 0,
996                                                  timeout_us, timeout_ms,
997                                                  NULL))
998                         drm_err_ratelimited(&gt->i915->drm,
999                                             "%s TLB invalidation did not complete in %ums!\n",
1000                                             engine->name, timeout_ms);
1001         }
1002
1003         /*
1004          * Use delayed put since a) we mostly expect a flurry of TLB
1005          * invalidations so it is good to avoid paying the forcewake cost and
1006          * b) it works around a bug in Icelake which cannot cope with too rapid
1007          * transitions.
1008          */
1009         intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL);
1010 }
1011
1012 static bool tlb_seqno_passed(const struct intel_gt *gt, u32 seqno)
1013 {
1014         u32 cur = intel_gt_tlb_seqno(gt);
1015
1016         /* Only skip if a *full* TLB invalidate barrier has passed */
1017         return (s32)(cur - ALIGN(seqno, 2)) > 0;
1018 }
1019
1020 void intel_gt_invalidate_tlb(struct intel_gt *gt, u32 seqno)
1021 {
1022         intel_wakeref_t wakeref;
1023
1024         if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
1025                 return;
1026
1027         if (intel_gt_is_wedged(gt))
1028                 return;
1029
1030         if (tlb_seqno_passed(gt, seqno))
1031                 return;
1032
1033         with_intel_gt_pm_if_awake(gt, wakeref) {
1034                 mutex_lock(&gt->tlb.invalidate_lock);
1035                 if (tlb_seqno_passed(gt, seqno))
1036                         goto unlock;
1037
1038                 mmio_invalidate_full(gt);
1039
1040                 write_seqcount_invalidate(&gt->tlb.seqno);
1041 unlock:
1042                 mutex_unlock(&gt->tlb.invalidate_lock);
1043         }
1044 }