drm/xe: Add dbg messages for LRC WAs
[linux-block.git] / drivers / gpu / drm / xe / xe_gt.c
CommitLineData
dd08ebf6
MB
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2022 Intel Corporation
4 */
5
ea9f879d
LDM
6#include "xe_gt.h"
7
dd08ebf6
MB
8#include <linux/minmax.h>
9
10#include <drm/drm_managed.h>
4f027e30 11#include <drm/xe_drm.h>
dd08ebf6 12
226bfec8 13#include "regs/xe_gt_regs.h"
dd08ebf6
MB
14#include "xe_bb.h"
15#include "xe_bo.h"
16#include "xe_device.h"
c22a4ed0 17#include "xe_exec_queue.h"
dd08ebf6
MB
18#include "xe_execlist.h"
19#include "xe_force_wake.h"
20#include "xe_ggtt.h"
dd08ebf6 21#include "xe_gt_clock.h"
1c2097bb 22#include "xe_gt_idle_sysfs.h"
dd08ebf6
MB
23#include "xe_gt_mcr.h"
24#include "xe_gt_pagefault.h"
3e535bd5 25#include "xe_gt_printk.h"
dd08ebf6 26#include "xe_gt_sysfs.h"
a9351846 27#include "xe_gt_tlb_invalidation.h"
dd08ebf6 28#include "xe_gt_topology.h"
9b9529ce 29#include "xe_guc_exec_queue_types.h"
dd08ebf6 30#include "xe_hw_fence.h"
038ff941 31#include "xe_hw_engine_class_sysfs.h"
dd08ebf6
MB
32#include "xe_irq.h"
33#include "xe_lrc.h"
34#include "xe_map.h"
35#include "xe_migrate.h"
36#include "xe_mmio.h"
576c6380 37#include "xe_pat.h"
dd08ebf6
MB
38#include "xe_mocs.h"
39#include "xe_reg_sr.h"
40#include "xe_ring_ops.h"
41#include "xe_sa.h"
42#include "xe_sched_job.h"
dd08ebf6
MB
43#include "xe_tuning.h"
44#include "xe_uc.h"
45#include "xe_vm.h"
46#include "xe_wa.h"
47#include "xe_wopcm.h"
48
f6929e80 49struct xe_gt *xe_gt_alloc(struct xe_tile *tile)
dd08ebf6 50{
f6929e80 51 struct xe_gt *gt;
dd08ebf6 52
f6929e80
MR
53 gt = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*gt), GFP_KERNEL);
54 if (!gt)
55 return ERR_PTR(-ENOMEM);
56
57 gt->tile = tile;
dd08ebf6
MB
58 gt->ordered_wq = alloc_ordered_workqueue("gt-ordered-wq", 0);
59
f6929e80 60 return gt;
dd08ebf6
MB
61}
62
da3799c9
MB
63void xe_gt_sanitize(struct xe_gt *gt)
64{
65 /*
66 * FIXME: if xe_uc_sanitize is called here, on TGL driver will not
67 * reload
68 */
69 gt->uc.guc.submission_state.enabled = false;
70}
71
dd08ebf6
MB
72static void gt_fini(struct drm_device *drm, void *arg)
73{
74 struct xe_gt *gt = arg;
75 int i;
76
77 destroy_workqueue(gt->ordered_wq);
78
79 for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
80 xe_hw_fence_irq_finish(&gt->fence_irq[i]);
81}
82
83static void gt_reset_worker(struct work_struct *w);
84
9b9529ce 85static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
dd08ebf6
MB
86{
87 struct xe_sched_job *job;
88 struct xe_bb *bb;
89 struct dma_fence *fence;
dd08ebf6
MB
90 long timeout;
91
92 bb = xe_bb_new(gt, 4, false);
93 if (IS_ERR(bb))
94 return PTR_ERR(bb);
95
9e952635 96 job = xe_bb_create_job(q, bb);
dd08ebf6
MB
97 if (IS_ERR(job)) {
98 xe_bb_free(bb, NULL);
99c5952f 99 return PTR_ERR(job);
dd08ebf6
MB
100 }
101
102 xe_sched_job_arm(job);
103 fence = dma_fence_get(&job->drm.s_fence->finished);
104 xe_sched_job_push(job);
105
106 timeout = dma_fence_wait_timeout(fence, false, HZ);
107 dma_fence_put(fence);
108 xe_bb_free(bb, NULL);
109 if (timeout < 0)
110 return timeout;
111 else if (!timeout)
112 return -ETIME;
113
114 return 0;
115}
116
9b9529ce 117static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
dd08ebf6 118{
9b9529ce 119 struct xe_reg_sr *sr = &q->hwe->reg_lrc;
dd08ebf6
MB
120 struct xe_reg_sr_entry *entry;
121 unsigned long reg;
122 struct xe_sched_job *job;
123 struct xe_bb *bb;
124 struct dma_fence *fence;
dd08ebf6
MB
125 long timeout;
126 int count = 0;
127
128 bb = xe_bb_new(gt, SZ_4K, false); /* Just pick a large BB size */
129 if (IS_ERR(bb))
130 return PTR_ERR(bb);
131
132 xa_for_each(&sr->xa, reg, entry)
133 ++count;
134
135 if (count) {
12a66a47
LDM
136 xe_gt_dbg(gt, "LRC WA %s save-restore batch\n", sr->name);
137
dd08ebf6
MB
138 bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM(count);
139 xa_for_each(&sr->xa, reg, entry) {
140 bb->cs[bb->len++] = reg;
141 bb->cs[bb->len++] = entry->set_bits;
12a66a47
LDM
142 xe_gt_dbg(gt, "REG[0x%lx] = 0x%08x", reg,
143 entry->set_bits);
dd08ebf6
MB
144 }
145 }
dd08ebf6 146
9e952635 147 job = xe_bb_create_job(q, bb);
dd08ebf6
MB
148 if (IS_ERR(job)) {
149 xe_bb_free(bb, NULL);
99c5952f 150 return PTR_ERR(job);
dd08ebf6
MB
151 }
152
153 xe_sched_job_arm(job);
154 fence = dma_fence_get(&job->drm.s_fence->finished);
155 xe_sched_job_push(job);
156
157 timeout = dma_fence_wait_timeout(fence, false, HZ);
158 dma_fence_put(fence);
159 xe_bb_free(bb, NULL);
160 if (timeout < 0)
161 return timeout;
162 else if (!timeout)
163 return -ETIME;
164
165 return 0;
166}
167
168int xe_gt_record_default_lrcs(struct xe_gt *gt)
169{
170 struct xe_device *xe = gt_to_xe(gt);
171 struct xe_hw_engine *hwe;
172 enum xe_hw_engine_id id;
173 int err = 0;
174
175 for_each_hw_engine(hwe, gt, id) {
9b9529ce 176 struct xe_exec_queue *q, *nop_q;
dd08ebf6
MB
177 void *default_lrc;
178
179 if (gt->default_lrc[hwe->class])
180 continue;
181
766849c4 182 xe_reg_sr_init(&hwe->reg_lrc, hwe->name, xe);
dd08ebf6 183 xe_wa_process_lrc(hwe);
bb95a4f9 184 xe_hw_engine_setup_default_lrc_state(hwe);
3dbec470 185 xe_tuning_process_lrc(hwe);
dd08ebf6
MB
186
187 default_lrc = drmm_kzalloc(&xe->drm,
188 xe_lrc_size(xe, hwe->class),
189 GFP_KERNEL);
190 if (!default_lrc)
191 return -ENOMEM;
192
9e952635
DCS
193 q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance), 1,
194 hwe, EXEC_QUEUE_FLAG_KERNEL);
9b9529ce
FD
195 if (IS_ERR(q)) {
196 err = PTR_ERR(q);
197 xe_gt_err(gt, "hwe %s: xe_exec_queue_create failed (%pe)\n",
198 hwe->name, q);
9e952635 199 return err;
dd08ebf6
MB
200 }
201
202 /* Prime golden LRC with known good state */
9b9529ce 203 err = emit_wa_job(gt, q);
3d4451d3 204 if (err) {
3e535bd5 205 xe_gt_err(gt, "hwe %s: emit_wa_job failed (%pe) guc_id=%u\n",
9b9529ce
FD
206 hwe->name, ERR_PTR(err), q->guc->id);
207 goto put_exec_queue;
3d4451d3 208 }
dd08ebf6 209
9e952635
DCS
210 nop_q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance),
211 1, hwe, EXEC_QUEUE_FLAG_KERNEL);
9b9529ce
FD
212 if (IS_ERR(nop_q)) {
213 err = PTR_ERR(nop_q);
214 xe_gt_err(gt, "hwe %s: nop xe_exec_queue_create failed (%pe)\n",
215 hwe->name, nop_q);
216 goto put_exec_queue;
dd08ebf6
MB
217 }
218
219 /* Switch to different LRC */
9b9529ce 220 err = emit_nop_job(gt, nop_q);
3d4451d3 221 if (err) {
3e535bd5 222 xe_gt_err(gt, "hwe %s: nop emit_nop_job failed (%pe) guc_id=%u\n",
9b9529ce
FD
223 hwe->name, ERR_PTR(err), nop_q->guc->id);
224 goto put_nop_q;
3d4451d3 225 }
dd08ebf6
MB
226
227 /* Reload golden LRC to record the effect of any indirect W/A */
9b9529ce 228 err = emit_nop_job(gt, q);
3d4451d3 229 if (err) {
3e535bd5 230 xe_gt_err(gt, "hwe %s: emit_nop_job failed (%pe) guc_id=%u\n",
9b9529ce
FD
231 hwe->name, ERR_PTR(err), q->guc->id);
232 goto put_nop_q;
3d4451d3 233 }
dd08ebf6
MB
234
235 xe_map_memcpy_from(xe, default_lrc,
9b9529ce
FD
236 &q->lrc[0].bo->vmap,
237 xe_lrc_pphwsp_offset(&q->lrc[0]),
dd08ebf6
MB
238 xe_lrc_size(xe, hwe->class));
239
240 gt->default_lrc[hwe->class] = default_lrc;
9b9529ce
FD
241put_nop_q:
242 xe_exec_queue_put(nop_q);
243put_exec_queue:
244 xe_exec_queue_put(q);
dd08ebf6
MB
245 if (err)
246 break;
247 }
248
249 return err;
250}
251
252int xe_gt_init_early(struct xe_gt *gt)
253{
254 int err;
255
256 xe_force_wake_init_gt(gt, gt_to_fw(gt));
257
258 err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
259 if (err)
260 return err;
261
262 xe_gt_topology_init(gt);
263 xe_gt_mcr_init(gt);
264
265 err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
266 if (err)
267 return err;
268
269 xe_reg_sr_init(&gt->reg_sr, "GT", gt_to_xe(gt));
49d329a0
LDM
270
271 err = xe_wa_init(gt);
272 if (err)
273 return err;
274
dd08ebf6 275 xe_wa_process_gt(gt);
9616e74b 276 xe_wa_process_oob(gt);
dd08ebf6
MB
277 xe_tuning_process_gt(gt);
278
279 return 0;
280}
281
dd08ebf6
MB
282static int gt_fw_domain_init(struct xe_gt *gt)
283{
284 int err, i;
285
286 xe_device_mem_access_get(gt_to_xe(gt));
287 err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
288 if (err)
289 goto err_hw_fence_irq;
290
576c6380 291 xe_pat_init(gt);
6c8c1e74 292
dd08ebf6 293 if (!xe_gt_is_media_type(gt)) {
ad703e06 294 err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt);
dd08ebf6
MB
295 if (err)
296 goto err_force_wake;
297 }
298
dd08ebf6 299 err = xe_uc_init(&gt->uc);
33de290b
CB
300 if (err)
301 goto err_force_wake;
dd08ebf6
MB
302
303 err = xe_uc_init_hwconfig(&gt->uc);
304 if (err)
305 goto err_force_wake;
306
1c2097bb
RT
307 xe_gt_idle_sysfs_init(&gt->gtidle);
308
da34c2cf
MB
309 /* XXX: Fake that we pull the engine mask from hwconfig blob */
310 gt->info.engine_mask = gt->info.__engine_mask;
311
3e29c149
MR
312 /* Enable per hw engine IRQs */
313 xe_irq_enable_hwe(gt);
dd08ebf6
MB
314
315 /* Rerun MCR init as we now have hw engine list */
316 xe_gt_mcr_init(gt);
317
318 err = xe_hw_engines_init_early(gt);
319 if (err)
320 goto err_force_wake;
321
038ff941
TU
322 err = xe_hw_engine_class_sysfs_init(gt);
323 if (err)
324 drm_warn(&gt_to_xe(gt)->drm,
325 "failed to register engines sysfs directory, err: %d\n",
326 err);
327
dd08ebf6
MB
328 err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
329 XE_WARN_ON(err);
330 xe_device_mem_access_put(gt_to_xe(gt));
331
332 return 0;
333
334err_force_wake:
335 xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
336err_hw_fence_irq:
337 for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
338 xe_hw_fence_irq_finish(&gt->fence_irq[i]);
339 xe_device_mem_access_put(gt_to_xe(gt));
340
341 return err;
342}
343
344static int all_fw_domain_init(struct xe_gt *gt)
345{
346 int err, i;
347
348 xe_device_mem_access_get(gt_to_xe(gt));
349 err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
350 if (err)
351 goto err_hw_fence_irq;
352
564d64f8 353 xe_gt_mcr_set_implicit_defaults(gt);
dd08ebf6
MB
354 xe_reg_sr_apply_mmio(&gt->reg_sr, gt);
355
356 err = xe_gt_clock_init(gt);
357 if (err)
358 goto err_force_wake;
359
360 xe_mocs_init(gt);
361 err = xe_execlist_init(gt);
362 if (err)
363 goto err_force_wake;
364
365 err = xe_hw_engines_init(gt);
366 if (err)
367 goto err_force_wake;
368
369 err = xe_uc_init_post_hwconfig(&gt->uc);
370 if (err)
371 goto err_force_wake;
372
dd08ebf6 373 if (!xe_gt_is_media_type(gt)) {
dd08ebf6
MB
374 /*
375 * USM has its only SA pool to non-block behind user operations
376 */
377 if (gt_to_xe(gt)->info.supports_usm) {
876611c2 378 gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt), SZ_1M, 16);
0a12a612
MR
379 if (IS_ERR(gt->usm.bb_pool)) {
380 err = PTR_ERR(gt->usm.bb_pool);
dd08ebf6 381 goto err_force_wake;
0a12a612 382 }
dd08ebf6
MB
383 }
384 }
385
386 if (!xe_gt_is_media_type(gt)) {
08dea767
MR
387 struct xe_tile *tile = gt_to_tile(gt);
388
389 tile->migrate = xe_migrate_init(tile);
390 if (IS_ERR(tile->migrate)) {
391 err = PTR_ERR(tile->migrate);
dd08ebf6 392 goto err_force_wake;
99c5952f 393 }
dd08ebf6
MB
394 }
395
396 err = xe_uc_init_hw(&gt->uc);
397 if (err)
398 goto err_force_wake;
399
400 err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
401 XE_WARN_ON(err);
402 xe_device_mem_access_put(gt_to_xe(gt));
403
404 return 0;
405
406err_force_wake:
407 xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
408err_hw_fence_irq:
409 for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
410 xe_hw_fence_irq_finish(&gt->fence_irq[i]);
411 xe_device_mem_access_put(gt_to_xe(gt));
412
413 return err;
414}
415
416int xe_gt_init(struct xe_gt *gt)
417{
418 int err;
419 int i;
420
421 INIT_WORK(&gt->reset.worker, gt_reset_worker);
422
423 for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i) {
424 gt->ring_ops[i] = xe_ring_ops_get(gt, i);
425 xe_hw_fence_irq_init(&gt->fence_irq[i]);
426 }
427
a9351846
MB
428 err = xe_gt_tlb_invalidation_init(gt);
429 if (err)
430 return err;
431
dd08ebf6
MB
432 err = xe_gt_pagefault_init(gt);
433 if (err)
434 return err;
435
17a6726c
MR
436 xe_mocs_init_early(gt);
437
dd08ebf6
MB
438 xe_gt_sysfs_init(gt);
439
440 err = gt_fw_domain_init(gt);
441 if (err)
442 return err;
443
444 xe_force_wake_init_engines(gt, gt_to_fw(gt));
445
446 err = all_fw_domain_init(gt);
447 if (err)
448 return err;
449
dd08ebf6
MB
450 err = drmm_add_action_or_reset(&gt_to_xe(gt)->drm, gt_fini, gt);
451 if (err)
452 return err;
453
454 return 0;
455}
456
671ca05d 457static int do_gt_reset(struct xe_gt *gt)
dd08ebf6 458{
dd08ebf6
MB
459 int err;
460
ce8bf5bd 461 xe_mmio_write32(gt, GDRST, GRDOM_FULL);
063e09af 462 err = xe_mmio_wait32(gt, GDRST, GRDOM_FULL, 0, 5000, NULL, false);
dd08ebf6 463 if (err)
3e535bd5
MW
464 xe_gt_err(gt, "failed to clear GEN11_GRDOM_FULL (%pe)\n",
465 ERR_PTR(err));
dd08ebf6
MB
466
467 return err;
468}
469
470static int do_gt_restart(struct xe_gt *gt)
471{
472 struct xe_hw_engine *hwe;
473 enum xe_hw_engine_id id;
474 int err;
475
576c6380 476 xe_pat_init(gt);
dd08ebf6 477
564d64f8 478 xe_gt_mcr_set_implicit_defaults(gt);
dd08ebf6
MB
479 xe_reg_sr_apply_mmio(&gt->reg_sr, gt);
480
481 err = xe_wopcm_init(&gt->uc.wopcm);
482 if (err)
483 return err;
484
485 for_each_hw_engine(hwe, gt, id)
486 xe_hw_engine_enable_ring(hwe);
487
488 err = xe_uc_init_hw(&gt->uc);
489 if (err)
490 return err;
491
492 xe_mocs_init(gt);
493 err = xe_uc_start(&gt->uc);
494 if (err)
495 return err;
496
497 for_each_hw_engine(hwe, gt, id) {
498 xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
1011812c 499 xe_reg_sr_apply_whitelist(hwe);
dd08ebf6
MB
500 }
501
502 return 0;
503}
504
4f027e30
HPG
505static void xe_uevent_gt_reset_failure(struct pci_dev *pdev, u8 tile_id, u8 gt_id)
506{
507 char *reset_event[4];
508
509 reset_event[0] = XE_RESET_FAILED_UEVENT "=NEEDS_RESET";
510 reset_event[1] = kasprintf(GFP_KERNEL, "TILE_ID=%d", tile_id);
511 reset_event[2] = kasprintf(GFP_KERNEL, "GT_ID=%d", gt_id);
512 reset_event[3] = NULL;
513 kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, reset_event);
514
515 kfree(reset_event[1]);
516 kfree(reset_event[2]);
517}
518
dd08ebf6
MB
519static int gt_reset(struct xe_gt *gt)
520{
dd08ebf6
MB
521 int err;
522
523 /* We only support GT resets with GuC submission */
524 if (!xe_device_guc_submission_enabled(gt_to_xe(gt)))
525 return -ENODEV;
526
3e535bd5 527 xe_gt_info(gt, "reset started\n");
dd08ebf6 528
8f3013e0
HPG
529 if (xe_fault_inject_gt_reset()) {
530 err = -ECANCELED;
531 goto err_fail;
532 }
533
da3799c9
MB
534 xe_gt_sanitize(gt);
535
dd08ebf6
MB
536 xe_device_mem_access_get(gt_to_xe(gt));
537 err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
538 if (err)
539 goto err_msg;
540
1737785a 541 xe_uc_gucrc_disable(&gt->uc);
dd08ebf6
MB
542 xe_uc_stop_prepare(&gt->uc);
543 xe_gt_pagefault_reset(gt);
544
545 err = xe_uc_stop(&gt->uc);
546 if (err)
547 goto err_out;
548
549 err = do_gt_reset(gt);
550 if (err)
551 goto err_out;
552
7b24cc3e
MA
553 xe_gt_tlb_invalidation_reset(gt);
554
dd08ebf6
MB
555 err = do_gt_restart(gt);
556 if (err)
557 goto err_out;
558
dd08ebf6 559 err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
03af26c9 560 xe_device_mem_access_put(gt_to_xe(gt));
dd08ebf6
MB
561 XE_WARN_ON(err);
562
3e535bd5 563 xe_gt_info(gt, "reset done\n");
dd08ebf6
MB
564
565 return 0;
566
567err_out:
568 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
569err_msg:
570 XE_WARN_ON(xe_uc_start(&gt->uc));
571 xe_device_mem_access_put(gt_to_xe(gt));
8f3013e0 572err_fail:
3e535bd5 573 xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));
dd08ebf6 574
4f027e30
HPG
575 /* Notify userspace about gt reset failure */
576 xe_uevent_gt_reset_failure(to_pci_dev(gt_to_xe(gt)->drm.dev),
577 gt_to_tile(gt)->id, gt->info.id);
578
dd08ebf6
MB
579 return err;
580}
581
582static void gt_reset_worker(struct work_struct *w)
583{
584 struct xe_gt *gt = container_of(w, typeof(*gt), reset.worker);
585
586 gt_reset(gt);
587}
588
589void xe_gt_reset_async(struct xe_gt *gt)
590{
3e535bd5 591 xe_gt_info(gt, "trying reset\n");
dd08ebf6
MB
592
593 /* Don't do a reset while one is already in flight */
8f3013e0 594 if (!xe_fault_inject_gt_reset() && xe_uc_reset_prepare(&gt->uc))
dd08ebf6
MB
595 return;
596
3e535bd5 597 xe_gt_info(gt, "reset queued\n");
dd08ebf6
MB
598 queue_work(gt->ordered_wq, &gt->reset.worker);
599}
600
601void xe_gt_suspend_prepare(struct xe_gt *gt)
602{
603 xe_device_mem_access_get(gt_to_xe(gt));
604 XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL));
605
606 xe_uc_stop_prepare(&gt->uc);
607
dd08ebf6 608 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
03af26c9 609 xe_device_mem_access_put(gt_to_xe(gt));
dd08ebf6
MB
610}
611
612int xe_gt_suspend(struct xe_gt *gt)
613{
dd08ebf6
MB
614 int err;
615
616 /* For now suspend/resume is only allowed with GuC */
617 if (!xe_device_guc_submission_enabled(gt_to_xe(gt)))
618 return -ENODEV;
619
da3799c9
MB
620 xe_gt_sanitize(gt);
621
dd08ebf6
MB
622 xe_device_mem_access_get(gt_to_xe(gt));
623 err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
624 if (err)
625 goto err_msg;
626
627 err = xe_uc_suspend(&gt->uc);
628 if (err)
629 goto err_force_wake;
630
dd08ebf6 631 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
03af26c9 632 xe_device_mem_access_put(gt_to_xe(gt));
3e535bd5 633 xe_gt_info(gt, "suspended\n");
dd08ebf6
MB
634
635 return 0;
636
637err_force_wake:
638 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
639err_msg:
640 xe_device_mem_access_put(gt_to_xe(gt));
3e535bd5 641 xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(err));
dd08ebf6
MB
642
643 return err;
644}
645
646int xe_gt_resume(struct xe_gt *gt)
647{
dd08ebf6
MB
648 int err;
649
650 xe_device_mem_access_get(gt_to_xe(gt));
651 err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
652 if (err)
653 goto err_msg;
654
655 err = do_gt_restart(gt);
656 if (err)
657 goto err_force_wake;
658
dd08ebf6 659 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
03af26c9 660 xe_device_mem_access_put(gt_to_xe(gt));
3e535bd5 661 xe_gt_info(gt, "resumed\n");
dd08ebf6
MB
662
663 return 0;
664
665err_force_wake:
666 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
667err_msg:
668 xe_device_mem_access_put(gt_to_xe(gt));
3e535bd5 669 xe_gt_err(gt, "resume failed (%pe)\n", ERR_PTR(err));
dd08ebf6
MB
670
671 return err;
672}
673
dd08ebf6
MB
674struct xe_hw_engine *xe_gt_hw_engine(struct xe_gt *gt,
675 enum xe_engine_class class,
676 u16 instance, bool logical)
677{
678 struct xe_hw_engine *hwe;
679 enum xe_hw_engine_id id;
680
681 for_each_hw_engine(hwe, gt, id)
682 if (hwe->class == class &&
683 ((!logical && hwe->instance == instance) ||
684 (logical && hwe->logical_instance == instance)))
685 return hwe;
686
687 return NULL;
688}
689
690struct xe_hw_engine *xe_gt_any_hw_engine_by_reset_domain(struct xe_gt *gt,
691 enum xe_engine_class class)
692{
693 struct xe_hw_engine *hwe;
694 enum xe_hw_engine_id id;
695
696 for_each_hw_engine(hwe, gt, id) {
697 switch (class) {
698 case XE_ENGINE_CLASS_RENDER:
699 case XE_ENGINE_CLASS_COMPUTE:
700 if (hwe->class == XE_ENGINE_CLASS_RENDER ||
701 hwe->class == XE_ENGINE_CLASS_COMPUTE)
702 return hwe;
703 break;
704 default:
705 if (hwe->class == class)
706 return hwe;
707 }
708 }
709
710 return NULL;
711}