drm/xe: Cleanup style warnings
[linux-block.git] / drivers / gpu / drm / xe / xe_gt.c
CommitLineData
dd08ebf6
MB
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2022 Intel Corporation
4 */
5
ea9f879d
LDM
6#include "xe_gt.h"
7
dd08ebf6
MB
8#include <linux/minmax.h>
9
10#include <drm/drm_managed.h>
11
226bfec8 12#include "regs/xe_gt_regs.h"
dd08ebf6
MB
13#include "xe_bb.h"
14#include "xe_bo.h"
15#include "xe_device.h"
16#include "xe_engine.h"
17#include "xe_execlist.h"
18#include "xe_force_wake.h"
19#include "xe_ggtt.h"
dd08ebf6 20#include "xe_gt_clock.h"
1c2097bb 21#include "xe_gt_idle_sysfs.h"
dd08ebf6
MB
22#include "xe_gt_mcr.h"
23#include "xe_gt_pagefault.h"
3e535bd5 24#include "xe_gt_printk.h"
dd08ebf6 25#include "xe_gt_sysfs.h"
a9351846 26#include "xe_gt_tlb_invalidation.h"
dd08ebf6 27#include "xe_gt_topology.h"
3d4451d3 28#include "xe_guc_engine_types.h"
dd08ebf6
MB
29#include "xe_hw_fence.h"
30#include "xe_irq.h"
31#include "xe_lrc.h"
32#include "xe_map.h"
33#include "xe_migrate.h"
34#include "xe_mmio.h"
576c6380 35#include "xe_pat.h"
dd08ebf6
MB
36#include "xe_mocs.h"
37#include "xe_reg_sr.h"
38#include "xe_ring_ops.h"
39#include "xe_sa.h"
40#include "xe_sched_job.h"
dd08ebf6
MB
41#include "xe_tuning.h"
42#include "xe_uc.h"
43#include "xe_vm.h"
44#include "xe_wa.h"
45#include "xe_wopcm.h"
46
f6929e80 47struct xe_gt *xe_gt_alloc(struct xe_tile *tile)
dd08ebf6 48{
f6929e80 49 struct xe_gt *gt;
dd08ebf6 50
f6929e80
MR
51 gt = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*gt), GFP_KERNEL);
52 if (!gt)
53 return ERR_PTR(-ENOMEM);
54
55 gt->tile = tile;
dd08ebf6
MB
56 gt->ordered_wq = alloc_ordered_workqueue("gt-ordered-wq", 0);
57
f6929e80 58 return gt;
dd08ebf6
MB
59}
60
da3799c9
MB
61void xe_gt_sanitize(struct xe_gt *gt)
62{
63 /*
64 * FIXME: if xe_uc_sanitize is called here, on TGL driver will not
65 * reload
66 */
67 gt->uc.guc.submission_state.enabled = false;
68}
69
dd08ebf6
MB
70static void gt_fini(struct drm_device *drm, void *arg)
71{
72 struct xe_gt *gt = arg;
73 int i;
74
75 destroy_workqueue(gt->ordered_wq);
76
77 for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
78 xe_hw_fence_irq_finish(&gt->fence_irq[i]);
79}
80
81static void gt_reset_worker(struct work_struct *w);
82
671ca05d 83static int emit_nop_job(struct xe_gt *gt, struct xe_engine *e)
dd08ebf6
MB
84{
85 struct xe_sched_job *job;
86 struct xe_bb *bb;
87 struct dma_fence *fence;
88 u64 batch_ofs;
89 long timeout;
90
91 bb = xe_bb_new(gt, 4, false);
92 if (IS_ERR(bb))
93 return PTR_ERR(bb);
94
876611c2 95 batch_ofs = xe_bo_ggtt_addr(gt_to_tile(gt)->mem.kernel_bb_pool->bo);
dd08ebf6
MB
96 job = xe_bb_create_wa_job(e, bb, batch_ofs);
97 if (IS_ERR(job)) {
98 xe_bb_free(bb, NULL);
99c5952f 99 return PTR_ERR(job);
dd08ebf6
MB
100 }
101
102 xe_sched_job_arm(job);
103 fence = dma_fence_get(&job->drm.s_fence->finished);
104 xe_sched_job_push(job);
105
106 timeout = dma_fence_wait_timeout(fence, false, HZ);
107 dma_fence_put(fence);
108 xe_bb_free(bb, NULL);
109 if (timeout < 0)
110 return timeout;
111 else if (!timeout)
112 return -ETIME;
113
114 return 0;
115}
116
671ca05d 117static int emit_wa_job(struct xe_gt *gt, struct xe_engine *e)
dd08ebf6
MB
118{
119 struct xe_reg_sr *sr = &e->hwe->reg_lrc;
120 struct xe_reg_sr_entry *entry;
121 unsigned long reg;
122 struct xe_sched_job *job;
123 struct xe_bb *bb;
124 struct dma_fence *fence;
125 u64 batch_ofs;
126 long timeout;
127 int count = 0;
128
129 bb = xe_bb_new(gt, SZ_4K, false); /* Just pick a large BB size */
130 if (IS_ERR(bb))
131 return PTR_ERR(bb);
132
133 xa_for_each(&sr->xa, reg, entry)
134 ++count;
135
136 if (count) {
137 bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM(count);
138 xa_for_each(&sr->xa, reg, entry) {
139 bb->cs[bb->len++] = reg;
140 bb->cs[bb->len++] = entry->set_bits;
141 }
142 }
dd08ebf6 143
876611c2 144 batch_ofs = xe_bo_ggtt_addr(gt_to_tile(gt)->mem.kernel_bb_pool->bo);
dd08ebf6
MB
145 job = xe_bb_create_wa_job(e, bb, batch_ofs);
146 if (IS_ERR(job)) {
147 xe_bb_free(bb, NULL);
99c5952f 148 return PTR_ERR(job);
dd08ebf6
MB
149 }
150
151 xe_sched_job_arm(job);
152 fence = dma_fence_get(&job->drm.s_fence->finished);
153 xe_sched_job_push(job);
154
155 timeout = dma_fence_wait_timeout(fence, false, HZ);
156 dma_fence_put(fence);
157 xe_bb_free(bb, NULL);
158 if (timeout < 0)
159 return timeout;
160 else if (!timeout)
161 return -ETIME;
162
163 return 0;
164}
165
166int xe_gt_record_default_lrcs(struct xe_gt *gt)
167{
168 struct xe_device *xe = gt_to_xe(gt);
08dea767 169 struct xe_tile *tile = gt_to_tile(gt);
dd08ebf6
MB
170 struct xe_hw_engine *hwe;
171 enum xe_hw_engine_id id;
172 int err = 0;
173
174 for_each_hw_engine(hwe, gt, id) {
175 struct xe_engine *e, *nop_e;
176 struct xe_vm *vm;
177 void *default_lrc;
178
179 if (gt->default_lrc[hwe->class])
180 continue;
181
766849c4 182 xe_reg_sr_init(&hwe->reg_lrc, hwe->name, xe);
dd08ebf6 183 xe_wa_process_lrc(hwe);
bb95a4f9 184 xe_hw_engine_setup_default_lrc_state(hwe);
3dbec470 185 xe_tuning_process_lrc(hwe);
dd08ebf6
MB
186
187 default_lrc = drmm_kzalloc(&xe->drm,
188 xe_lrc_size(xe, hwe->class),
189 GFP_KERNEL);
190 if (!default_lrc)
191 return -ENOMEM;
192
08dea767 193 vm = xe_migrate_get_vm(tile->migrate);
dd08ebf6
MB
194 e = xe_engine_create(xe, vm, BIT(hwe->logical_instance), 1,
195 hwe, ENGINE_FLAG_WA);
196 if (IS_ERR(e)) {
197 err = PTR_ERR(e);
3e535bd5
MW
198 xe_gt_err(gt, "hwe %s: xe_engine_create failed (%pe)\n",
199 hwe->name, e);
dd08ebf6
MB
200 goto put_vm;
201 }
202
203 /* Prime golden LRC with known good state */
204 err = emit_wa_job(gt, e);
3d4451d3 205 if (err) {
3e535bd5
MW
206 xe_gt_err(gt, "hwe %s: emit_wa_job failed (%pe) guc_id=%u\n",
207 hwe->name, ERR_PTR(err), e->guc->id);
dd08ebf6 208 goto put_engine;
3d4451d3 209 }
dd08ebf6
MB
210
211 nop_e = xe_engine_create(xe, vm, BIT(hwe->logical_instance),
212 1, hwe, ENGINE_FLAG_WA);
213 if (IS_ERR(nop_e)) {
214 err = PTR_ERR(nop_e);
3e535bd5
MW
215 xe_gt_err(gt, "hwe %s: nop xe_engine_create failed (%pe)\n",
216 hwe->name, nop_e);
dd08ebf6
MB
217 goto put_engine;
218 }
219
220 /* Switch to different LRC */
221 err = emit_nop_job(gt, nop_e);
3d4451d3 222 if (err) {
3e535bd5
MW
223 xe_gt_err(gt, "hwe %s: nop emit_nop_job failed (%pe) guc_id=%u\n",
224 hwe->name, ERR_PTR(err), nop_e->guc->id);
dd08ebf6 225 goto put_nop_e;
3d4451d3 226 }
dd08ebf6
MB
227
228 /* Reload golden LRC to record the effect of any indirect W/A */
229 err = emit_nop_job(gt, e);
3d4451d3 230 if (err) {
3e535bd5
MW
231 xe_gt_err(gt, "hwe %s: emit_nop_job failed (%pe) guc_id=%u\n",
232 hwe->name, ERR_PTR(err), e->guc->id);
dd08ebf6 233 goto put_nop_e;
3d4451d3 234 }
dd08ebf6
MB
235
236 xe_map_memcpy_from(xe, default_lrc,
237 &e->lrc[0].bo->vmap,
238 xe_lrc_pphwsp_offset(&e->lrc[0]),
239 xe_lrc_size(xe, hwe->class));
240
241 gt->default_lrc[hwe->class] = default_lrc;
242put_nop_e:
243 xe_engine_put(nop_e);
244put_engine:
245 xe_engine_put(e);
246put_vm:
247 xe_vm_put(vm);
248 if (err)
249 break;
250 }
251
252 return err;
253}
254
255int xe_gt_init_early(struct xe_gt *gt)
256{
257 int err;
258
259 xe_force_wake_init_gt(gt, gt_to_fw(gt));
260
261 err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
262 if (err)
263 return err;
264
265 xe_gt_topology_init(gt);
266 xe_gt_mcr_init(gt);
267
268 err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
269 if (err)
270 return err;
271
272 xe_reg_sr_init(&gt->reg_sr, "GT", gt_to_xe(gt));
49d329a0
LDM
273
274 err = xe_wa_init(gt);
275 if (err)
276 return err;
277
dd08ebf6 278 xe_wa_process_gt(gt);
9616e74b 279 xe_wa_process_oob(gt);
dd08ebf6
MB
280 xe_tuning_process_gt(gt);
281
282 return 0;
283}
284
dd08ebf6
MB
285static int gt_fw_domain_init(struct xe_gt *gt)
286{
287 int err, i;
288
289 xe_device_mem_access_get(gt_to_xe(gt));
290 err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
291 if (err)
292 goto err_hw_fence_irq;
293
576c6380 294 xe_pat_init(gt);
6c8c1e74 295
dd08ebf6 296 if (!xe_gt_is_media_type(gt)) {
ad703e06 297 err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt);
dd08ebf6
MB
298 if (err)
299 goto err_force_wake;
300 }
301
dd08ebf6 302 err = xe_uc_init(&gt->uc);
33de290b
CB
303 if (err)
304 goto err_force_wake;
dd08ebf6
MB
305
306 err = xe_uc_init_hwconfig(&gt->uc);
307 if (err)
308 goto err_force_wake;
309
1c2097bb
RT
310 xe_gt_idle_sysfs_init(&gt->gtidle);
311
da34c2cf
MB
312 /* XXX: Fake that we pull the engine mask from hwconfig blob */
313 gt->info.engine_mask = gt->info.__engine_mask;
314
3e29c149
MR
315 /* Enable per hw engine IRQs */
316 xe_irq_enable_hwe(gt);
dd08ebf6
MB
317
318 /* Rerun MCR init as we now have hw engine list */
319 xe_gt_mcr_init(gt);
320
321 err = xe_hw_engines_init_early(gt);
322 if (err)
323 goto err_force_wake;
324
325 err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
326 XE_WARN_ON(err);
327 xe_device_mem_access_put(gt_to_xe(gt));
328
329 return 0;
330
331err_force_wake:
332 xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
333err_hw_fence_irq:
334 for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
335 xe_hw_fence_irq_finish(&gt->fence_irq[i]);
336 xe_device_mem_access_put(gt_to_xe(gt));
337
338 return err;
339}
340
341static int all_fw_domain_init(struct xe_gt *gt)
342{
343 int err, i;
344
345 xe_device_mem_access_get(gt_to_xe(gt));
346 err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
347 if (err)
348 goto err_hw_fence_irq;
349
564d64f8 350 xe_gt_mcr_set_implicit_defaults(gt);
dd08ebf6
MB
351 xe_reg_sr_apply_mmio(&gt->reg_sr, gt);
352
353 err = xe_gt_clock_init(gt);
354 if (err)
355 goto err_force_wake;
356
357 xe_mocs_init(gt);
358 err = xe_execlist_init(gt);
359 if (err)
360 goto err_force_wake;
361
362 err = xe_hw_engines_init(gt);
363 if (err)
364 goto err_force_wake;
365
366 err = xe_uc_init_post_hwconfig(&gt->uc);
367 if (err)
368 goto err_force_wake;
369
dd08ebf6 370 if (!xe_gt_is_media_type(gt)) {
dd08ebf6
MB
371 /*
372 * USM has its only SA pool to non-block behind user operations
373 */
374 if (gt_to_xe(gt)->info.supports_usm) {
876611c2 375 gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt), SZ_1M, 16);
0a12a612
MR
376 if (IS_ERR(gt->usm.bb_pool)) {
377 err = PTR_ERR(gt->usm.bb_pool);
dd08ebf6 378 goto err_force_wake;
0a12a612 379 }
dd08ebf6
MB
380 }
381 }
382
383 if (!xe_gt_is_media_type(gt)) {
08dea767
MR
384 struct xe_tile *tile = gt_to_tile(gt);
385
386 tile->migrate = xe_migrate_init(tile);
387 if (IS_ERR(tile->migrate)) {
388 err = PTR_ERR(tile->migrate);
dd08ebf6 389 goto err_force_wake;
99c5952f 390 }
dd08ebf6
MB
391 }
392
393 err = xe_uc_init_hw(&gt->uc);
394 if (err)
395 goto err_force_wake;
396
397 err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
398 XE_WARN_ON(err);
399 xe_device_mem_access_put(gt_to_xe(gt));
400
401 return 0;
402
403err_force_wake:
404 xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
405err_hw_fence_irq:
406 for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
407 xe_hw_fence_irq_finish(&gt->fence_irq[i]);
408 xe_device_mem_access_put(gt_to_xe(gt));
409
410 return err;
411}
412
413int xe_gt_init(struct xe_gt *gt)
414{
415 int err;
416 int i;
417
418 INIT_WORK(&gt->reset.worker, gt_reset_worker);
419
420 for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i) {
421 gt->ring_ops[i] = xe_ring_ops_get(gt, i);
422 xe_hw_fence_irq_init(&gt->fence_irq[i]);
423 }
424
a9351846
MB
425 err = xe_gt_tlb_invalidation_init(gt);
426 if (err)
427 return err;
428
dd08ebf6
MB
429 err = xe_gt_pagefault_init(gt);
430 if (err)
431 return err;
432
17a6726c
MR
433 xe_mocs_init_early(gt);
434
dd08ebf6
MB
435 xe_gt_sysfs_init(gt);
436
437 err = gt_fw_domain_init(gt);
438 if (err)
439 return err;
440
441 xe_force_wake_init_engines(gt, gt_to_fw(gt));
442
443 err = all_fw_domain_init(gt);
444 if (err)
445 return err;
446
dd08ebf6
MB
447 err = drmm_add_action_or_reset(&gt_to_xe(gt)->drm, gt_fini, gt);
448 if (err)
449 return err;
450
451 return 0;
452}
453
671ca05d 454static int do_gt_reset(struct xe_gt *gt)
dd08ebf6 455{
dd08ebf6
MB
456 int err;
457
ce8bf5bd
LDM
458 xe_mmio_write32(gt, GDRST, GRDOM_FULL);
459 err = xe_mmio_wait32(gt, GDRST, 0, GRDOM_FULL, 5000,
7dc9b92d 460 NULL, false);
dd08ebf6 461 if (err)
3e535bd5
MW
462 xe_gt_err(gt, "failed to clear GEN11_GRDOM_FULL (%pe)\n",
463 ERR_PTR(err));
dd08ebf6
MB
464
465 return err;
466}
467
468static int do_gt_restart(struct xe_gt *gt)
469{
470 struct xe_hw_engine *hwe;
471 enum xe_hw_engine_id id;
472 int err;
473
576c6380 474 xe_pat_init(gt);
dd08ebf6 475
564d64f8 476 xe_gt_mcr_set_implicit_defaults(gt);
dd08ebf6
MB
477 xe_reg_sr_apply_mmio(&gt->reg_sr, gt);
478
479 err = xe_wopcm_init(&gt->uc.wopcm);
480 if (err)
481 return err;
482
483 for_each_hw_engine(hwe, gt, id)
484 xe_hw_engine_enable_ring(hwe);
485
486 err = xe_uc_init_hw(&gt->uc);
487 if (err)
488 return err;
489
490 xe_mocs_init(gt);
491 err = xe_uc_start(&gt->uc);
492 if (err)
493 return err;
494
495 for_each_hw_engine(hwe, gt, id) {
496 xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
1011812c 497 xe_reg_sr_apply_whitelist(hwe);
dd08ebf6
MB
498 }
499
500 return 0;
501}
502
503static int gt_reset(struct xe_gt *gt)
504{
dd08ebf6
MB
505 int err;
506
507 /* We only support GT resets with GuC submission */
508 if (!xe_device_guc_submission_enabled(gt_to_xe(gt)))
509 return -ENODEV;
510
3e535bd5 511 xe_gt_info(gt, "reset started\n");
dd08ebf6 512
da3799c9
MB
513 xe_gt_sanitize(gt);
514
dd08ebf6
MB
515 xe_device_mem_access_get(gt_to_xe(gt));
516 err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
517 if (err)
518 goto err_msg;
519
520 xe_uc_stop_prepare(&gt->uc);
521 xe_gt_pagefault_reset(gt);
522
523 err = xe_uc_stop(&gt->uc);
524 if (err)
525 goto err_out;
526
527 err = do_gt_reset(gt);
528 if (err)
529 goto err_out;
530
7b24cc3e
MA
531 xe_gt_tlb_invalidation_reset(gt);
532
dd08ebf6
MB
533 err = do_gt_restart(gt);
534 if (err)
535 goto err_out;
536
537 xe_device_mem_access_put(gt_to_xe(gt));
538 err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
539 XE_WARN_ON(err);
540
3e535bd5 541 xe_gt_info(gt, "reset done\n");
dd08ebf6
MB
542
543 return 0;
544
545err_out:
546 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
547err_msg:
548 XE_WARN_ON(xe_uc_start(&gt->uc));
549 xe_device_mem_access_put(gt_to_xe(gt));
3e535bd5 550 xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));
dd08ebf6
MB
551
552 return err;
553}
554
555static void gt_reset_worker(struct work_struct *w)
556{
557 struct xe_gt *gt = container_of(w, typeof(*gt), reset.worker);
558
559 gt_reset(gt);
560}
561
562void xe_gt_reset_async(struct xe_gt *gt)
563{
3e535bd5 564 xe_gt_info(gt, "trying reset\n");
dd08ebf6
MB
565
566 /* Don't do a reset while one is already in flight */
567 if (xe_uc_reset_prepare(&gt->uc))
568 return;
569
3e535bd5 570 xe_gt_info(gt, "reset queued\n");
dd08ebf6
MB
571 queue_work(gt->ordered_wq, &gt->reset.worker);
572}
573
574void xe_gt_suspend_prepare(struct xe_gt *gt)
575{
576 xe_device_mem_access_get(gt_to_xe(gt));
577 XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL));
578
579 xe_uc_stop_prepare(&gt->uc);
580
581 xe_device_mem_access_put(gt_to_xe(gt));
582 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
583}
584
585int xe_gt_suspend(struct xe_gt *gt)
586{
dd08ebf6
MB
587 int err;
588
589 /* For now suspend/resume is only allowed with GuC */
590 if (!xe_device_guc_submission_enabled(gt_to_xe(gt)))
591 return -ENODEV;
592
da3799c9
MB
593 xe_gt_sanitize(gt);
594
dd08ebf6
MB
595 xe_device_mem_access_get(gt_to_xe(gt));
596 err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
597 if (err)
598 goto err_msg;
599
600 err = xe_uc_suspend(&gt->uc);
601 if (err)
602 goto err_force_wake;
603
604 xe_device_mem_access_put(gt_to_xe(gt));
605 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
3e535bd5 606 xe_gt_info(gt, "suspended\n");
dd08ebf6
MB
607
608 return 0;
609
610err_force_wake:
611 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
612err_msg:
613 xe_device_mem_access_put(gt_to_xe(gt));
3e535bd5 614 xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(err));
dd08ebf6
MB
615
616 return err;
617}
618
619int xe_gt_resume(struct xe_gt *gt)
620{
dd08ebf6
MB
621 int err;
622
623 xe_device_mem_access_get(gt_to_xe(gt));
624 err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
625 if (err)
626 goto err_msg;
627
628 err = do_gt_restart(gt);
629 if (err)
630 goto err_force_wake;
631
632 xe_device_mem_access_put(gt_to_xe(gt));
633 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
3e535bd5 634 xe_gt_info(gt, "resumed\n");
dd08ebf6
MB
635
636 return 0;
637
638err_force_wake:
639 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
640err_msg:
641 xe_device_mem_access_put(gt_to_xe(gt));
3e535bd5 642 xe_gt_err(gt, "resume failed (%pe)\n", ERR_PTR(err));
dd08ebf6
MB
643
644 return err;
645}
646
dd08ebf6
MB
647struct xe_hw_engine *xe_gt_hw_engine(struct xe_gt *gt,
648 enum xe_engine_class class,
649 u16 instance, bool logical)
650{
651 struct xe_hw_engine *hwe;
652 enum xe_hw_engine_id id;
653
654 for_each_hw_engine(hwe, gt, id)
655 if (hwe->class == class &&
656 ((!logical && hwe->instance == instance) ||
657 (logical && hwe->logical_instance == instance)))
658 return hwe;
659
660 return NULL;
661}
662
663struct xe_hw_engine *xe_gt_any_hw_engine_by_reset_domain(struct xe_gt *gt,
664 enum xe_engine_class class)
665{
666 struct xe_hw_engine *hwe;
667 enum xe_hw_engine_id id;
668
669 for_each_hw_engine(hwe, gt, id) {
670 switch (class) {
671 case XE_ENGINE_CLASS_RENDER:
672 case XE_ENGINE_CLASS_COMPUTE:
673 if (hwe->class == XE_ENGINE_CLASS_RENDER ||
674 hwe->class == XE_ENGINE_CLASS_COMPUTE)
675 return hwe;
676 break;
677 default:
678 if (hwe->class == class)
679 return hwe;
680 }
681 }
682
683 return NULL;
684}