1 // SPDX-License-Identifier: MIT
3 * Copyright © 2022 Intel Corporation
8 #include <linux/minmax.h>
10 #include <drm/drm_managed.h>
11 #include <drm/xe_drm.h>
13 #include "instructions/xe_gfxpipe_commands.h"
14 #include "instructions/xe_mi_commands.h"
15 #include "regs/xe_gt_regs.h"
16 #include "xe_assert.h"
19 #include "xe_device.h"
20 #include "xe_exec_queue.h"
21 #include "xe_execlist.h"
22 #include "xe_force_wake.h"
25 #include "xe_gt_ccs_mode.h"
26 #include "xe_gt_clock.h"
27 #include "xe_gt_freq.h"
28 #include "xe_gt_idle.h"
29 #include "xe_gt_mcr.h"
30 #include "xe_gt_pagefault.h"
31 #include "xe_gt_printk.h"
32 #include "xe_gt_sysfs.h"
33 #include "xe_gt_tlb_invalidation.h"
34 #include "xe_gt_topology.h"
35 #include "xe_guc_exec_queue_types.h"
36 #include "xe_guc_pc.h"
37 #include "xe_hw_fence.h"
38 #include "xe_hw_engine_class_sysfs.h"
43 #include "xe_migrate.h"
47 #include "xe_reg_sr.h"
48 #include "xe_ring_ops.h"
50 #include "xe_sched_job.h"
52 #include "xe_tuning.h"
58 struct xe_gt *xe_gt_alloc(struct xe_tile *tile)
62 gt = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*gt), GFP_KERNEL);
64 return ERR_PTR(-ENOMEM);
67 gt->ordered_wq = alloc_ordered_workqueue("gt-ordered-wq", 0);
72 void xe_gt_sanitize(struct xe_gt *gt)
75 * FIXME: if xe_uc_sanitize is called here, on TGL driver will not
78 gt->uc.guc.submission_state.enabled = false;
82 * xe_gt_remove() - Clean up the GT structures before driver removal
85 * This function should only act on objects/structures that must be cleaned
86 * before the driver removal callback is complete and therefore can't be
87 * deferred to a drmm action.
89 void xe_gt_remove(struct xe_gt *gt)
91 xe_uc_remove(>->uc);
94 static void gt_fini(struct drm_device *drm, void *arg)
96 struct xe_gt *gt = arg;
99 destroy_workqueue(gt->ordered_wq);
101 for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
102 xe_hw_fence_irq_finish(>->fence_irq[i]);
105 static void gt_reset_worker(struct work_struct *w);
107 static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
109 struct xe_sched_job *job;
111 struct dma_fence *fence;
114 bb = xe_bb_new(gt, 4, false);
118 job = xe_bb_create_job(q, bb);
120 xe_bb_free(bb, NULL);
124 xe_sched_job_arm(job);
125 fence = dma_fence_get(&job->drm.s_fence->finished);
126 xe_sched_job_push(job);
128 timeout = dma_fence_wait_timeout(fence, false, HZ);
129 dma_fence_put(fence);
130 xe_bb_free(bb, NULL);
140 * Convert back from encoded value to type-safe, only to be used when reg.mcr
143 static struct xe_reg_mcr to_xe_reg_mcr(const struct xe_reg reg)
145 return (const struct xe_reg_mcr){.__reg.raw = reg.raw };
148 static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
150 struct xe_reg_sr *sr = &q->hwe->reg_lrc;
151 struct xe_reg_sr_entry *entry;
153 struct xe_sched_job *job;
155 struct dma_fence *fence;
159 if (q->hwe->class == XE_ENGINE_CLASS_RENDER)
160 /* Big enough to emit all of the context's 3DSTATE */
161 bb = xe_bb_new(gt, xe_lrc_size(gt_to_xe(gt), q->hwe->class), false);
163 /* Just pick a large BB size */
164 bb = xe_bb_new(gt, SZ_4K, false);
169 xa_for_each(&sr->xa, idx, entry)
173 xe_gt_dbg(gt, "LRC WA %s save-restore batch\n", sr->name);
175 bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(count);
177 xa_for_each(&sr->xa, idx, entry) {
178 struct xe_reg reg = entry->reg;
179 struct xe_reg_mcr reg_mcr = to_xe_reg_mcr(reg);
183 * Skip reading the register if it's not really needed
186 val = entry->clr_bits << 16;
187 else if (entry->clr_bits + 1)
189 xe_gt_mcr_unicast_read_any(gt, reg_mcr) :
190 xe_mmio_read32(gt, reg)) & (~entry->clr_bits);
194 val |= entry->set_bits;
196 bb->cs[bb->len++] = reg.addr;
197 bb->cs[bb->len++] = val;
198 xe_gt_dbg(gt, "REG[0x%x] = 0x%08x", reg.addr, val);
202 xe_lrc_emit_hwe_state_instructions(q, bb);
204 job = xe_bb_create_job(q, bb);
206 xe_bb_free(bb, NULL);
210 xe_sched_job_arm(job);
211 fence = dma_fence_get(&job->drm.s_fence->finished);
212 xe_sched_job_push(job);
214 timeout = dma_fence_wait_timeout(fence, false, HZ);
215 dma_fence_put(fence);
216 xe_bb_free(bb, NULL);
225 int xe_gt_record_default_lrcs(struct xe_gt *gt)
227 struct xe_device *xe = gt_to_xe(gt);
228 struct xe_hw_engine *hwe;
229 enum xe_hw_engine_id id;
232 for_each_hw_engine(hwe, gt, id) {
233 struct xe_exec_queue *q, *nop_q;
236 if (gt->default_lrc[hwe->class])
239 xe_reg_sr_init(&hwe->reg_lrc, hwe->name, xe);
240 xe_wa_process_lrc(hwe);
241 xe_hw_engine_setup_default_lrc_state(hwe);
242 xe_tuning_process_lrc(hwe);
244 default_lrc = drmm_kzalloc(&xe->drm,
245 xe_lrc_size(xe, hwe->class),
250 q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance), 1,
251 hwe, EXEC_QUEUE_FLAG_KERNEL, 0);
254 xe_gt_err(gt, "hwe %s: xe_exec_queue_create failed (%pe)\n",
259 /* Prime golden LRC with known good state */
260 err = emit_wa_job(gt, q);
262 xe_gt_err(gt, "hwe %s: emit_wa_job failed (%pe) guc_id=%u\n",
263 hwe->name, ERR_PTR(err), q->guc->id);
267 nop_q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance),
268 1, hwe, EXEC_QUEUE_FLAG_KERNEL, 0);
270 err = PTR_ERR(nop_q);
271 xe_gt_err(gt, "hwe %s: nop xe_exec_queue_create failed (%pe)\n",
276 /* Switch to different LRC */
277 err = emit_nop_job(gt, nop_q);
279 xe_gt_err(gt, "hwe %s: nop emit_nop_job failed (%pe) guc_id=%u\n",
280 hwe->name, ERR_PTR(err), nop_q->guc->id);
284 /* Reload golden LRC to record the effect of any indirect W/A */
285 err = emit_nop_job(gt, q);
287 xe_gt_err(gt, "hwe %s: emit_nop_job failed (%pe) guc_id=%u\n",
288 hwe->name, ERR_PTR(err), q->guc->id);
292 xe_map_memcpy_from(xe, default_lrc,
294 xe_lrc_pphwsp_offset(&q->lrc[0]),
295 xe_lrc_size(xe, hwe->class));
297 gt->default_lrc[hwe->class] = default_lrc;
299 xe_exec_queue_put(nop_q);
301 xe_exec_queue_put(q);
309 int xe_gt_init_early(struct xe_gt *gt)
313 err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
317 err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
321 xe_reg_sr_init(>->reg_sr, "GT", gt_to_xe(gt));
323 err = xe_wa_init(gt);
327 xe_wa_process_gt(gt);
328 xe_wa_process_oob(gt);
329 xe_tuning_process_gt(gt);
334 static void dump_pat_on_error(struct xe_gt *gt)
336 struct drm_printer p;
339 snprintf(prefix, sizeof(prefix), "[GT%u Error]", gt->info.id);
340 p = drm_dbg_printer(>_to_xe(gt)->drm, DRM_UT_DRIVER, prefix);
345 static int gt_fw_domain_init(struct xe_gt *gt)
349 xe_device_mem_access_get(gt_to_xe(gt));
350 err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
352 goto err_hw_fence_irq;
354 if (!xe_gt_is_media_type(gt)) {
355 err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt);
358 if (IS_SRIOV_PF(gt_to_xe(gt)))
359 xe_lmtt_init(>_to_tile(gt)->sriov.pf.lmtt);
362 xe_gt_idle_sysfs_init(>->gtidle);
364 /* Enable per hw engine IRQs */
365 xe_irq_enable_hwe(gt);
367 /* Rerun MCR init as we now have hw engine list */
370 err = xe_hw_engines_init_early(gt);
374 err = xe_hw_engine_class_sysfs_init(gt);
376 drm_warn(>_to_xe(gt)->drm,
377 "failed to register engines sysfs directory, err: %d\n",
380 /* Initialize CCS mode sysfs after early initialization of HW engines */
381 xe_gt_ccs_mode_sysfs_init(gt);
384 * Stash hardware-reported version. Since this register does not exist
385 * on pre-MTL platforms, reading it there will (correctly) return 0.
387 gt->info.gmdid = xe_mmio_read32(gt, GMD_ID);
389 err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
391 xe_device_mem_access_put(gt_to_xe(gt));
396 dump_pat_on_error(gt);
397 xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
399 for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
400 xe_hw_fence_irq_finish(>->fence_irq[i]);
401 xe_device_mem_access_put(gt_to_xe(gt));
406 static int all_fw_domain_init(struct xe_gt *gt)
410 xe_device_mem_access_get(gt_to_xe(gt));
411 err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
413 goto err_hw_fence_irq;
415 xe_gt_mcr_set_implicit_defaults(gt);
416 xe_reg_sr_apply_mmio(>->reg_sr, gt);
418 err = xe_gt_clock_init(gt);
423 err = xe_execlist_init(gt);
427 err = xe_hw_engines_init(gt);
431 if (!xe_gt_is_media_type(gt)) {
433 * USM has its only SA pool to non-block behind user operations
435 if (gt_to_xe(gt)->info.has_usm) {
436 struct xe_device *xe = gt_to_xe(gt);
438 gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt),
439 IS_DGFX(xe) ? SZ_1M : SZ_512K, 16);
440 if (IS_ERR(gt->usm.bb_pool)) {
441 err = PTR_ERR(gt->usm.bb_pool);
447 if (!xe_gt_is_media_type(gt)) {
448 struct xe_tile *tile = gt_to_tile(gt);
450 tile->migrate = xe_migrate_init(tile);
451 if (IS_ERR(tile->migrate)) {
452 err = PTR_ERR(tile->migrate);
457 err = xe_uc_init_post_hwconfig(>->uc);
461 err = xe_uc_init_hw(>->uc);
465 /* Configure default CCS mode of 1 engine with all resources */
466 if (xe_gt_ccs_mode_enabled(gt)) {
468 xe_gt_apply_ccs_mode(gt);
471 if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt))
472 xe_lmtt_init_hw(>_to_tile(gt)->sriov.pf.lmtt);
474 err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
476 xe_device_mem_access_put(gt_to_xe(gt));
481 xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
483 for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
484 xe_hw_fence_irq_finish(>->fence_irq[i]);
485 xe_device_mem_access_put(gt_to_xe(gt));
491 * Initialize enough GT to be able to load GuC in order to obtain hwconfig and
492 * enable CTB communication.
494 int xe_gt_init_hwconfig(struct xe_gt *gt)
498 xe_device_mem_access_get(gt_to_xe(gt));
499 err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
503 xe_gt_topology_init(gt);
507 err = xe_uc_init(>->uc);
511 err = xe_uc_init_hwconfig(>->uc);
515 /* XXX: Fake that we pull the engine mask from hwconfig blob */
516 gt->info.engine_mask = gt->info.__engine_mask;
519 xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
521 xe_device_mem_access_put(gt_to_xe(gt));
526 int xe_gt_init(struct xe_gt *gt)
531 INIT_WORK(>->reset.worker, gt_reset_worker);
533 for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i) {
534 gt->ring_ops[i] = xe_ring_ops_get(gt, i);
535 xe_hw_fence_irq_init(>->fence_irq[i]);
538 err = xe_gt_tlb_invalidation_init(gt);
542 err = xe_gt_pagefault_init(gt);
546 xe_mocs_init_early(gt);
548 xe_gt_sysfs_init(gt);
550 err = gt_fw_domain_init(gt);
556 xe_force_wake_init_engines(gt, gt_to_fw(gt));
558 err = all_fw_domain_init(gt);
562 err = drmm_add_action_or_reset(>_to_xe(gt)->drm, gt_fini, gt);
569 static int do_gt_reset(struct xe_gt *gt)
573 xe_gsc_wa_14015076503(gt, true);
575 xe_mmio_write32(gt, GDRST, GRDOM_FULL);
576 err = xe_mmio_wait32(gt, GDRST, GRDOM_FULL, 0, 5000, NULL, false);
578 xe_gt_err(gt, "failed to clear GRDOM_FULL (%pe)\n",
581 xe_gsc_wa_14015076503(gt, false);
586 static int do_gt_restart(struct xe_gt *gt)
588 struct xe_hw_engine *hwe;
589 enum xe_hw_engine_id id;
594 xe_gt_mcr_set_implicit_defaults(gt);
595 xe_reg_sr_apply_mmio(>->reg_sr, gt);
597 err = xe_wopcm_init(>->uc.wopcm);
601 for_each_hw_engine(hwe, gt, id)
602 xe_hw_engine_enable_ring(hwe);
604 err = xe_uc_sanitize_reset(>->uc);
608 err = xe_uc_init_hw(>->uc);
612 if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt))
613 xe_lmtt_init_hw(>_to_tile(gt)->sriov.pf.lmtt);
616 err = xe_uc_start(>->uc);
620 for_each_hw_engine(hwe, gt, id) {
621 xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
622 xe_reg_sr_apply_whitelist(hwe);
625 /* Get CCS mode in sync between sw/hw */
626 xe_gt_apply_ccs_mode(gt);
631 static int gt_reset(struct xe_gt *gt)
635 /* We only support GT resets with GuC submission */
636 if (!xe_device_uc_enabled(gt_to_xe(gt)))
639 xe_gt_info(gt, "reset started\n");
641 if (xe_fault_inject_gt_reset()) {
648 xe_device_mem_access_get(gt_to_xe(gt));
649 err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
653 xe_uc_gucrc_disable(>->uc);
654 xe_uc_stop_prepare(>->uc);
655 xe_gt_pagefault_reset(gt);
657 err = xe_uc_stop(>->uc);
661 xe_gt_tlb_invalidation_reset(gt);
663 err = do_gt_reset(gt);
667 err = do_gt_restart(gt);
671 err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
672 xe_device_mem_access_put(gt_to_xe(gt));
675 xe_gt_info(gt, "reset done\n");
680 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
682 XE_WARN_ON(xe_uc_start(>->uc));
683 xe_device_mem_access_put(gt_to_xe(gt));
685 xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));
687 gt_to_xe(gt)->needs_flr_on_fini = true;
692 static void gt_reset_worker(struct work_struct *w)
694 struct xe_gt *gt = container_of(w, typeof(*gt), reset.worker);
699 void xe_gt_reset_async(struct xe_gt *gt)
701 xe_gt_info(gt, "trying reset\n");
703 /* Don't do a reset while one is already in flight */
704 if (!xe_fault_inject_gt_reset() && xe_uc_reset_prepare(>->uc))
707 xe_gt_info(gt, "reset queued\n");
708 queue_work(gt->ordered_wq, >->reset.worker);
711 void xe_gt_suspend_prepare(struct xe_gt *gt)
713 xe_device_mem_access_get(gt_to_xe(gt));
714 XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL));
716 xe_uc_stop_prepare(>->uc);
718 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
719 xe_device_mem_access_put(gt_to_xe(gt));
722 int xe_gt_suspend(struct xe_gt *gt)
728 xe_device_mem_access_get(gt_to_xe(gt));
729 err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
733 err = xe_uc_suspend(>->uc);
737 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
738 xe_device_mem_access_put(gt_to_xe(gt));
739 xe_gt_info(gt, "suspended\n");
744 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
746 xe_device_mem_access_put(gt_to_xe(gt));
747 xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(err));
752 int xe_gt_resume(struct xe_gt *gt)
756 xe_device_mem_access_get(gt_to_xe(gt));
757 err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
761 err = do_gt_restart(gt);
765 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
766 xe_device_mem_access_put(gt_to_xe(gt));
767 xe_gt_info(gt, "resumed\n");
772 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
774 xe_device_mem_access_put(gt_to_xe(gt));
775 xe_gt_err(gt, "resume failed (%pe)\n", ERR_PTR(err));
780 struct xe_hw_engine *xe_gt_hw_engine(struct xe_gt *gt,
781 enum xe_engine_class class,
782 u16 instance, bool logical)
784 struct xe_hw_engine *hwe;
785 enum xe_hw_engine_id id;
787 for_each_hw_engine(hwe, gt, id)
788 if (hwe->class == class &&
789 ((!logical && hwe->instance == instance) ||
790 (logical && hwe->logical_instance == instance)))
796 struct xe_hw_engine *xe_gt_any_hw_engine_by_reset_domain(struct xe_gt *gt,
797 enum xe_engine_class class)
799 struct xe_hw_engine *hwe;
800 enum xe_hw_engine_id id;
802 for_each_hw_engine(hwe, gt, id) {
804 case XE_ENGINE_CLASS_RENDER:
805 case XE_ENGINE_CLASS_COMPUTE:
806 if (hwe->class == XE_ENGINE_CLASS_RENDER ||
807 hwe->class == XE_ENGINE_CLASS_COMPUTE)
811 if (hwe->class == class)