drm/xe/guc: Read HXG fields from DW1 of G2H response
[linux-block.git] / drivers / gpu / drm / xe / xe_gt.c
CommitLineData
dd08ebf6
MB
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2022 Intel Corporation
4 */
5
ea9f879d
LDM
6#include "xe_gt.h"
7
dd08ebf6
MB
8#include <linux/minmax.h>
9
10#include <drm/drm_managed.h>
11
226bfec8 12#include "regs/xe_gt_regs.h"
dd08ebf6
MB
13#include "xe_bb.h"
14#include "xe_bo.h"
15#include "xe_device.h"
16#include "xe_engine.h"
17#include "xe_execlist.h"
18#include "xe_force_wake.h"
19#include "xe_ggtt.h"
dd08ebf6
MB
20#include "xe_gt_clock.h"
21#include "xe_gt_mcr.h"
22#include "xe_gt_pagefault.h"
3e535bd5 23#include "xe_gt_printk.h"
dd08ebf6 24#include "xe_gt_sysfs.h"
a9351846 25#include "xe_gt_tlb_invalidation.h"
dd08ebf6 26#include "xe_gt_topology.h"
3d4451d3 27#include "xe_guc_engine_types.h"
dd08ebf6
MB
28#include "xe_hw_fence.h"
29#include "xe_irq.h"
30#include "xe_lrc.h"
31#include "xe_map.h"
32#include "xe_migrate.h"
33#include "xe_mmio.h"
576c6380 34#include "xe_pat.h"
dd08ebf6
MB
35#include "xe_mocs.h"
36#include "xe_reg_sr.h"
37#include "xe_ring_ops.h"
38#include "xe_sa.h"
39#include "xe_sched_job.h"
dd08ebf6
MB
40#include "xe_tuning.h"
41#include "xe_uc.h"
42#include "xe_vm.h"
43#include "xe_wa.h"
44#include "xe_wopcm.h"
45
f6929e80 46struct xe_gt *xe_gt_alloc(struct xe_tile *tile)
dd08ebf6 47{
f6929e80 48 struct xe_gt *gt;
dd08ebf6 49
f6929e80
MR
50 gt = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*gt), GFP_KERNEL);
51 if (!gt)
52 return ERR_PTR(-ENOMEM);
53
54 gt->tile = tile;
dd08ebf6
MB
55 gt->ordered_wq = alloc_ordered_workqueue("gt-ordered-wq", 0);
56
f6929e80 57 return gt;
dd08ebf6
MB
58}
59
da3799c9
MB
60void xe_gt_sanitize(struct xe_gt *gt)
61{
62 /*
63 * FIXME: if xe_uc_sanitize is called here, on TGL driver will not
64 * reload
65 */
66 gt->uc.guc.submission_state.enabled = false;
67}
68
dd08ebf6
MB
69static void gt_fini(struct drm_device *drm, void *arg)
70{
71 struct xe_gt *gt = arg;
72 int i;
73
74 destroy_workqueue(gt->ordered_wq);
75
76 for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
77 xe_hw_fence_irq_finish(&gt->fence_irq[i]);
78}
79
80static void gt_reset_worker(struct work_struct *w);
81
671ca05d 82static int emit_nop_job(struct xe_gt *gt, struct xe_engine *e)
dd08ebf6
MB
83{
84 struct xe_sched_job *job;
85 struct xe_bb *bb;
86 struct dma_fence *fence;
87 u64 batch_ofs;
88 long timeout;
89
90 bb = xe_bb_new(gt, 4, false);
91 if (IS_ERR(bb))
92 return PTR_ERR(bb);
93
876611c2 94 batch_ofs = xe_bo_ggtt_addr(gt_to_tile(gt)->mem.kernel_bb_pool->bo);
dd08ebf6
MB
95 job = xe_bb_create_wa_job(e, bb, batch_ofs);
96 if (IS_ERR(job)) {
97 xe_bb_free(bb, NULL);
99c5952f 98 return PTR_ERR(job);
dd08ebf6
MB
99 }
100
101 xe_sched_job_arm(job);
102 fence = dma_fence_get(&job->drm.s_fence->finished);
103 xe_sched_job_push(job);
104
105 timeout = dma_fence_wait_timeout(fence, false, HZ);
106 dma_fence_put(fence);
107 xe_bb_free(bb, NULL);
108 if (timeout < 0)
109 return timeout;
110 else if (!timeout)
111 return -ETIME;
112
113 return 0;
114}
115
671ca05d 116static int emit_wa_job(struct xe_gt *gt, struct xe_engine *e)
dd08ebf6
MB
117{
118 struct xe_reg_sr *sr = &e->hwe->reg_lrc;
119 struct xe_reg_sr_entry *entry;
120 unsigned long reg;
121 struct xe_sched_job *job;
122 struct xe_bb *bb;
123 struct dma_fence *fence;
124 u64 batch_ofs;
125 long timeout;
126 int count = 0;
127
128 bb = xe_bb_new(gt, SZ_4K, false); /* Just pick a large BB size */
129 if (IS_ERR(bb))
130 return PTR_ERR(bb);
131
132 xa_for_each(&sr->xa, reg, entry)
133 ++count;
134
135 if (count) {
136 bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM(count);
137 xa_for_each(&sr->xa, reg, entry) {
138 bb->cs[bb->len++] = reg;
139 bb->cs[bb->len++] = entry->set_bits;
140 }
141 }
dd08ebf6 142
876611c2 143 batch_ofs = xe_bo_ggtt_addr(gt_to_tile(gt)->mem.kernel_bb_pool->bo);
dd08ebf6
MB
144 job = xe_bb_create_wa_job(e, bb, batch_ofs);
145 if (IS_ERR(job)) {
146 xe_bb_free(bb, NULL);
99c5952f 147 return PTR_ERR(job);
dd08ebf6
MB
148 }
149
150 xe_sched_job_arm(job);
151 fence = dma_fence_get(&job->drm.s_fence->finished);
152 xe_sched_job_push(job);
153
154 timeout = dma_fence_wait_timeout(fence, false, HZ);
155 dma_fence_put(fence);
156 xe_bb_free(bb, NULL);
157 if (timeout < 0)
158 return timeout;
159 else if (!timeout)
160 return -ETIME;
161
162 return 0;
163}
164
165int xe_gt_record_default_lrcs(struct xe_gt *gt)
166{
167 struct xe_device *xe = gt_to_xe(gt);
08dea767 168 struct xe_tile *tile = gt_to_tile(gt);
dd08ebf6
MB
169 struct xe_hw_engine *hwe;
170 enum xe_hw_engine_id id;
171 int err = 0;
172
173 for_each_hw_engine(hwe, gt, id) {
174 struct xe_engine *e, *nop_e;
175 struct xe_vm *vm;
176 void *default_lrc;
177
178 if (gt->default_lrc[hwe->class])
179 continue;
180
766849c4 181 xe_reg_sr_init(&hwe->reg_lrc, hwe->name, xe);
dd08ebf6 182 xe_wa_process_lrc(hwe);
bb95a4f9 183 xe_hw_engine_setup_default_lrc_state(hwe);
3dbec470 184 xe_tuning_process_lrc(hwe);
dd08ebf6
MB
185
186 default_lrc = drmm_kzalloc(&xe->drm,
187 xe_lrc_size(xe, hwe->class),
188 GFP_KERNEL);
189 if (!default_lrc)
190 return -ENOMEM;
191
08dea767 192 vm = xe_migrate_get_vm(tile->migrate);
dd08ebf6
MB
193 e = xe_engine_create(xe, vm, BIT(hwe->logical_instance), 1,
194 hwe, ENGINE_FLAG_WA);
195 if (IS_ERR(e)) {
196 err = PTR_ERR(e);
3e535bd5
MW
197 xe_gt_err(gt, "hwe %s: xe_engine_create failed (%pe)\n",
198 hwe->name, e);
dd08ebf6
MB
199 goto put_vm;
200 }
201
202 /* Prime golden LRC with known good state */
203 err = emit_wa_job(gt, e);
3d4451d3 204 if (err) {
3e535bd5
MW
205 xe_gt_err(gt, "hwe %s: emit_wa_job failed (%pe) guc_id=%u\n",
206 hwe->name, ERR_PTR(err), e->guc->id);
dd08ebf6 207 goto put_engine;
3d4451d3 208 }
dd08ebf6
MB
209
210 nop_e = xe_engine_create(xe, vm, BIT(hwe->logical_instance),
211 1, hwe, ENGINE_FLAG_WA);
212 if (IS_ERR(nop_e)) {
213 err = PTR_ERR(nop_e);
3e535bd5
MW
214 xe_gt_err(gt, "hwe %s: nop xe_engine_create failed (%pe)\n",
215 hwe->name, nop_e);
dd08ebf6
MB
216 goto put_engine;
217 }
218
219 /* Switch to different LRC */
220 err = emit_nop_job(gt, nop_e);
3d4451d3 221 if (err) {
3e535bd5
MW
222 xe_gt_err(gt, "hwe %s: nop emit_nop_job failed (%pe) guc_id=%u\n",
223 hwe->name, ERR_PTR(err), nop_e->guc->id);
dd08ebf6 224 goto put_nop_e;
3d4451d3 225 }
dd08ebf6
MB
226
227 /* Reload golden LRC to record the effect of any indirect W/A */
228 err = emit_nop_job(gt, e);
3d4451d3 229 if (err) {
3e535bd5
MW
230 xe_gt_err(gt, "hwe %s: emit_nop_job failed (%pe) guc_id=%u\n",
231 hwe->name, ERR_PTR(err), e->guc->id);
dd08ebf6 232 goto put_nop_e;
3d4451d3 233 }
dd08ebf6
MB
234
235 xe_map_memcpy_from(xe, default_lrc,
236 &e->lrc[0].bo->vmap,
237 xe_lrc_pphwsp_offset(&e->lrc[0]),
238 xe_lrc_size(xe, hwe->class));
239
240 gt->default_lrc[hwe->class] = default_lrc;
241put_nop_e:
242 xe_engine_put(nop_e);
243put_engine:
244 xe_engine_put(e);
245put_vm:
246 xe_vm_put(vm);
247 if (err)
248 break;
249 }
250
251 return err;
252}
253
254int xe_gt_init_early(struct xe_gt *gt)
255{
256 int err;
257
258 xe_force_wake_init_gt(gt, gt_to_fw(gt));
259
260 err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
261 if (err)
262 return err;
263
264 xe_gt_topology_init(gt);
265 xe_gt_mcr_init(gt);
266
267 err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
268 if (err)
269 return err;
270
271 xe_reg_sr_init(&gt->reg_sr, "GT", gt_to_xe(gt));
49d329a0
LDM
272
273 err = xe_wa_init(gt);
274 if (err)
275 return err;
276
dd08ebf6 277 xe_wa_process_gt(gt);
9616e74b 278 xe_wa_process_oob(gt);
dd08ebf6
MB
279 xe_tuning_process_gt(gt);
280
281 return 0;
282}
283
dd08ebf6
MB
284static int gt_fw_domain_init(struct xe_gt *gt)
285{
286 int err, i;
287
288 xe_device_mem_access_get(gt_to_xe(gt));
289 err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
290 if (err)
291 goto err_hw_fence_irq;
292
576c6380 293 xe_pat_init(gt);
6c8c1e74 294
dd08ebf6 295 if (!xe_gt_is_media_type(gt)) {
ad703e06 296 err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt);
dd08ebf6
MB
297 if (err)
298 goto err_force_wake;
299 }
300
dd08ebf6 301 err = xe_uc_init(&gt->uc);
33de290b
CB
302 if (err)
303 goto err_force_wake;
dd08ebf6
MB
304
305 err = xe_uc_init_hwconfig(&gt->uc);
306 if (err)
307 goto err_force_wake;
308
da34c2cf
MB
309 /* XXX: Fake that we pull the engine mask from hwconfig blob */
310 gt->info.engine_mask = gt->info.__engine_mask;
311
3e29c149
MR
312 /* Enable per hw engine IRQs */
313 xe_irq_enable_hwe(gt);
dd08ebf6
MB
314
315 /* Rerun MCR init as we now have hw engine list */
316 xe_gt_mcr_init(gt);
317
318 err = xe_hw_engines_init_early(gt);
319 if (err)
320 goto err_force_wake;
321
322 err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
323 XE_WARN_ON(err);
324 xe_device_mem_access_put(gt_to_xe(gt));
325
326 return 0;
327
328err_force_wake:
329 xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
330err_hw_fence_irq:
331 for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
332 xe_hw_fence_irq_finish(&gt->fence_irq[i]);
333 xe_device_mem_access_put(gt_to_xe(gt));
334
335 return err;
336}
337
338static int all_fw_domain_init(struct xe_gt *gt)
339{
340 int err, i;
341
342 xe_device_mem_access_get(gt_to_xe(gt));
343 err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
344 if (err)
345 goto err_hw_fence_irq;
346
564d64f8 347 xe_gt_mcr_set_implicit_defaults(gt);
dd08ebf6
MB
348 xe_reg_sr_apply_mmio(&gt->reg_sr, gt);
349
350 err = xe_gt_clock_init(gt);
351 if (err)
352 goto err_force_wake;
353
354 xe_mocs_init(gt);
355 err = xe_execlist_init(gt);
356 if (err)
357 goto err_force_wake;
358
359 err = xe_hw_engines_init(gt);
360 if (err)
361 goto err_force_wake;
362
363 err = xe_uc_init_post_hwconfig(&gt->uc);
364 if (err)
365 goto err_force_wake;
366
dd08ebf6 367 if (!xe_gt_is_media_type(gt)) {
dd08ebf6
MB
368 /*
369 * USM has its only SA pool to non-block behind user operations
370 */
371 if (gt_to_xe(gt)->info.supports_usm) {
876611c2 372 gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt), SZ_1M, 16);
0a12a612
MR
373 if (IS_ERR(gt->usm.bb_pool)) {
374 err = PTR_ERR(gt->usm.bb_pool);
dd08ebf6 375 goto err_force_wake;
0a12a612 376 }
dd08ebf6
MB
377 }
378 }
379
380 if (!xe_gt_is_media_type(gt)) {
08dea767
MR
381 struct xe_tile *tile = gt_to_tile(gt);
382
383 tile->migrate = xe_migrate_init(tile);
384 if (IS_ERR(tile->migrate)) {
385 err = PTR_ERR(tile->migrate);
dd08ebf6 386 goto err_force_wake;
99c5952f 387 }
dd08ebf6
MB
388 }
389
390 err = xe_uc_init_hw(&gt->uc);
391 if (err)
392 goto err_force_wake;
393
394 err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
395 XE_WARN_ON(err);
396 xe_device_mem_access_put(gt_to_xe(gt));
397
398 return 0;
399
400err_force_wake:
401 xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
402err_hw_fence_irq:
403 for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
404 xe_hw_fence_irq_finish(&gt->fence_irq[i]);
405 xe_device_mem_access_put(gt_to_xe(gt));
406
407 return err;
408}
409
410int xe_gt_init(struct xe_gt *gt)
411{
412 int err;
413 int i;
414
415 INIT_WORK(&gt->reset.worker, gt_reset_worker);
416
417 for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i) {
418 gt->ring_ops[i] = xe_ring_ops_get(gt, i);
419 xe_hw_fence_irq_init(&gt->fence_irq[i]);
420 }
421
a9351846
MB
422 err = xe_gt_tlb_invalidation_init(gt);
423 if (err)
424 return err;
425
dd08ebf6
MB
426 err = xe_gt_pagefault_init(gt);
427 if (err)
428 return err;
429
17a6726c
MR
430 xe_mocs_init_early(gt);
431
dd08ebf6
MB
432 xe_gt_sysfs_init(gt);
433
434 err = gt_fw_domain_init(gt);
435 if (err)
436 return err;
437
438 xe_force_wake_init_engines(gt, gt_to_fw(gt));
439
440 err = all_fw_domain_init(gt);
441 if (err)
442 return err;
443
dd08ebf6
MB
444 err = drmm_add_action_or_reset(&gt_to_xe(gt)->drm, gt_fini, gt);
445 if (err)
446 return err;
447
448 return 0;
449}
450
671ca05d 451static int do_gt_reset(struct xe_gt *gt)
dd08ebf6 452{
dd08ebf6
MB
453 int err;
454
ce8bf5bd
LDM
455 xe_mmio_write32(gt, GDRST, GRDOM_FULL);
456 err = xe_mmio_wait32(gt, GDRST, 0, GRDOM_FULL, 5000,
7dc9b92d 457 NULL, false);
dd08ebf6 458 if (err)
3e535bd5
MW
459 xe_gt_err(gt, "failed to clear GEN11_GRDOM_FULL (%pe)\n",
460 ERR_PTR(err));
dd08ebf6
MB
461
462 return err;
463}
464
465static int do_gt_restart(struct xe_gt *gt)
466{
467 struct xe_hw_engine *hwe;
468 enum xe_hw_engine_id id;
469 int err;
470
576c6380 471 xe_pat_init(gt);
dd08ebf6 472
564d64f8 473 xe_gt_mcr_set_implicit_defaults(gt);
dd08ebf6
MB
474 xe_reg_sr_apply_mmio(&gt->reg_sr, gt);
475
476 err = xe_wopcm_init(&gt->uc.wopcm);
477 if (err)
478 return err;
479
480 for_each_hw_engine(hwe, gt, id)
481 xe_hw_engine_enable_ring(hwe);
482
483 err = xe_uc_init_hw(&gt->uc);
484 if (err)
485 return err;
486
487 xe_mocs_init(gt);
488 err = xe_uc_start(&gt->uc);
489 if (err)
490 return err;
491
492 for_each_hw_engine(hwe, gt, id) {
493 xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
494 xe_reg_sr_apply_whitelist(&hwe->reg_whitelist,
495 hwe->mmio_base, gt);
496 }
497
498 return 0;
499}
500
501static int gt_reset(struct xe_gt *gt)
502{
dd08ebf6
MB
503 int err;
504
505 /* We only support GT resets with GuC submission */
506 if (!xe_device_guc_submission_enabled(gt_to_xe(gt)))
507 return -ENODEV;
508
3e535bd5 509 xe_gt_info(gt, "reset started\n");
dd08ebf6 510
da3799c9
MB
511 xe_gt_sanitize(gt);
512
dd08ebf6
MB
513 xe_device_mem_access_get(gt_to_xe(gt));
514 err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
515 if (err)
516 goto err_msg;
517
518 xe_uc_stop_prepare(&gt->uc);
519 xe_gt_pagefault_reset(gt);
fc108a8b 520 xe_gt_tlb_invalidation_reset(gt);
dd08ebf6
MB
521
522 err = xe_uc_stop(&gt->uc);
523 if (err)
524 goto err_out;
525
526 err = do_gt_reset(gt);
527 if (err)
528 goto err_out;
529
530 err = do_gt_restart(gt);
531 if (err)
532 goto err_out;
533
534 xe_device_mem_access_put(gt_to_xe(gt));
535 err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
536 XE_WARN_ON(err);
537
3e535bd5 538 xe_gt_info(gt, "reset done\n");
dd08ebf6
MB
539
540 return 0;
541
542err_out:
543 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
544err_msg:
545 XE_WARN_ON(xe_uc_start(&gt->uc));
546 xe_device_mem_access_put(gt_to_xe(gt));
3e535bd5 547 xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));
dd08ebf6
MB
548
549 return err;
550}
551
552static void gt_reset_worker(struct work_struct *w)
553{
554 struct xe_gt *gt = container_of(w, typeof(*gt), reset.worker);
555
556 gt_reset(gt);
557}
558
559void xe_gt_reset_async(struct xe_gt *gt)
560{
3e535bd5 561 xe_gt_info(gt, "trying reset\n");
dd08ebf6
MB
562
563 /* Don't do a reset while one is already in flight */
564 if (xe_uc_reset_prepare(&gt->uc))
565 return;
566
3e535bd5 567 xe_gt_info(gt, "reset queued\n");
dd08ebf6
MB
568 queue_work(gt->ordered_wq, &gt->reset.worker);
569}
570
571void xe_gt_suspend_prepare(struct xe_gt *gt)
572{
573 xe_device_mem_access_get(gt_to_xe(gt));
574 XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL));
575
576 xe_uc_stop_prepare(&gt->uc);
577
578 xe_device_mem_access_put(gt_to_xe(gt));
579 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
580}
581
582int xe_gt_suspend(struct xe_gt *gt)
583{
dd08ebf6
MB
584 int err;
585
586 /* For now suspend/resume is only allowed with GuC */
587 if (!xe_device_guc_submission_enabled(gt_to_xe(gt)))
588 return -ENODEV;
589
da3799c9
MB
590 xe_gt_sanitize(gt);
591
dd08ebf6
MB
592 xe_device_mem_access_get(gt_to_xe(gt));
593 err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
594 if (err)
595 goto err_msg;
596
597 err = xe_uc_suspend(&gt->uc);
598 if (err)
599 goto err_force_wake;
600
601 xe_device_mem_access_put(gt_to_xe(gt));
602 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
3e535bd5 603 xe_gt_info(gt, "suspended\n");
dd08ebf6
MB
604
605 return 0;
606
607err_force_wake:
608 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
609err_msg:
610 xe_device_mem_access_put(gt_to_xe(gt));
3e535bd5 611 xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(err));
dd08ebf6
MB
612
613 return err;
614}
615
616int xe_gt_resume(struct xe_gt *gt)
617{
dd08ebf6
MB
618 int err;
619
620 xe_device_mem_access_get(gt_to_xe(gt));
621 err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
622 if (err)
623 goto err_msg;
624
625 err = do_gt_restart(gt);
626 if (err)
627 goto err_force_wake;
628
629 xe_device_mem_access_put(gt_to_xe(gt));
630 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
3e535bd5 631 xe_gt_info(gt, "resumed\n");
dd08ebf6
MB
632
633 return 0;
634
635err_force_wake:
636 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
637err_msg:
638 xe_device_mem_access_put(gt_to_xe(gt));
3e535bd5 639 xe_gt_err(gt, "resume failed (%pe)\n", ERR_PTR(err));
dd08ebf6
MB
640
641 return err;
642}
643
dd08ebf6
MB
644struct xe_hw_engine *xe_gt_hw_engine(struct xe_gt *gt,
645 enum xe_engine_class class,
646 u16 instance, bool logical)
647{
648 struct xe_hw_engine *hwe;
649 enum xe_hw_engine_id id;
650
651 for_each_hw_engine(hwe, gt, id)
652 if (hwe->class == class &&
653 ((!logical && hwe->instance == instance) ||
654 (logical && hwe->logical_instance == instance)))
655 return hwe;
656
657 return NULL;
658}
659
660struct xe_hw_engine *xe_gt_any_hw_engine_by_reset_domain(struct xe_gt *gt,
661 enum xe_engine_class class)
662{
663 struct xe_hw_engine *hwe;
664 enum xe_hw_engine_id id;
665
666 for_each_hw_engine(hwe, gt, id) {
667 switch (class) {
668 case XE_ENGINE_CLASS_RENDER:
669 case XE_ENGINE_CLASS_COMPUTE:
670 if (hwe->class == XE_ENGINE_CLASS_RENDER ||
671 hwe->class == XE_ENGINE_CLASS_COMPUTE)
672 return hwe;
673 break;
674 default:
675 if (hwe->class == class)
676 return hwe;
677 }
678 }
679
680 return NULL;
681}