drm/xe: Rename engine to exec_queue
authorFrancois Dugast <francois.dugast@intel.com>
Mon, 31 Jul 2023 15:30:02 +0000 (17:30 +0200)
committerRodrigo Vivi <rodrigo.vivi@intel.com>
Thu, 21 Dec 2023 16:39:20 +0000 (11:39 -0500)
Engine was inappropriately used to refer to execution queues and it
also created some confusion with hardware engines. Where it applies
the exec_queue variable name is changed to q and comments are also
updated.

Link: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/162
Signed-off-by: Francois Dugast <francois.dugast@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
47 files changed:
drivers/gpu/drm/xe/tests/xe_migrate.c
drivers/gpu/drm/xe/xe_bb.c
drivers/gpu/drm/xe/xe_bb.h
drivers/gpu/drm/xe/xe_devcoredump.c
drivers/gpu/drm/xe/xe_devcoredump.h
drivers/gpu/drm/xe/xe_devcoredump_types.h
drivers/gpu/drm/xe/xe_device.c
drivers/gpu/drm/xe/xe_device.h
drivers/gpu/drm/xe/xe_device_types.h
drivers/gpu/drm/xe/xe_engine_types.h [deleted file]
drivers/gpu/drm/xe/xe_exec.c
drivers/gpu/drm/xe/xe_exec_queue.c
drivers/gpu/drm/xe/xe_exec_queue.h
drivers/gpu/drm/xe/xe_exec_queue_types.h [new file with mode: 0644]
drivers/gpu/drm/xe/xe_execlist.c
drivers/gpu/drm/xe/xe_execlist_types.h
drivers/gpu/drm/xe/xe_gt.c
drivers/gpu/drm/xe/xe_gt_types.h
drivers/gpu/drm/xe/xe_guc_ads.c
drivers/gpu/drm/xe/xe_guc_ct.c
drivers/gpu/drm/xe/xe_guc_engine_types.h [deleted file]
drivers/gpu/drm/xe/xe_guc_exec_queue_types.h [new file with mode: 0644]
drivers/gpu/drm/xe/xe_guc_fwif.h
drivers/gpu/drm/xe/xe_guc_submit.c
drivers/gpu/drm/xe/xe_guc_submit.h
drivers/gpu/drm/xe/xe_guc_submit_types.h
drivers/gpu/drm/xe/xe_guc_types.h
drivers/gpu/drm/xe/xe_lrc.c
drivers/gpu/drm/xe/xe_lrc.h
drivers/gpu/drm/xe/xe_migrate.c
drivers/gpu/drm/xe/xe_migrate.h
drivers/gpu/drm/xe/xe_mocs.h
drivers/gpu/drm/xe/xe_preempt_fence.c
drivers/gpu/drm/xe/xe_preempt_fence.h
drivers/gpu/drm/xe/xe_preempt_fence_types.h
drivers/gpu/drm/xe/xe_pt.c
drivers/gpu/drm/xe/xe_pt.h
drivers/gpu/drm/xe/xe_query.c
drivers/gpu/drm/xe/xe_ring_ops.c
drivers/gpu/drm/xe/xe_sched_job.c
drivers/gpu/drm/xe/xe_sched_job.h
drivers/gpu/drm/xe/xe_sched_job_types.h
drivers/gpu/drm/xe/xe_trace.h
drivers/gpu/drm/xe/xe_vm.c
drivers/gpu/drm/xe/xe_vm.h
drivers/gpu/drm/xe/xe_vm_types.h
include/uapi/drm/xe_drm.h

index 9e9b228fe31536cda69e55ea7493798908442be1..5c8d5e78d9bc4a582cd001ab49586b9a0d090ac5 100644 (file)
@@ -38,7 +38,7 @@ static int run_sanity_job(struct xe_migrate *m, struct xe_device *xe,
                          struct kunit *test)
 {
        u64 batch_base = xe_migrate_batch_base(m, xe->info.supports_usm);
-       struct xe_sched_job *job = xe_bb_create_migration_job(m->eng, bb,
+       struct xe_sched_job *job = xe_bb_create_migration_job(m->q, bb,
                                                              batch_base,
                                                              second_idx);
        struct dma_fence *fence;
@@ -215,7 +215,7 @@ static void test_pt_update(struct xe_migrate *m, struct xe_bo *pt,
        xe_map_memset(xe, &pt->vmap, 0, (u8)expected, pt->size);
 
        then = ktime_get();
-       fence = xe_migrate_update_pgtables(m, NULL, NULL, m->eng, &update, 1,
+       fence = xe_migrate_update_pgtables(m, NULL, NULL, m->q, &update, 1,
                                           NULL, 0, &pt_update);
        now = ktime_get();
        if (sanity_fence_failed(xe, fence, "Migration pagetable update", test))
@@ -257,7 +257,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
                return;
        }
 
-       big = xe_bo_create_pin_map(xe, tile, m->eng->vm, SZ_4M,
+       big = xe_bo_create_pin_map(xe, tile, m->q->vm, SZ_4M,
                                   ttm_bo_type_kernel,
                                   XE_BO_CREATE_VRAM_IF_DGFX(tile) |
                                   XE_BO_CREATE_PINNED_BIT);
@@ -266,7 +266,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
                goto vunmap;
        }
 
-       pt = xe_bo_create_pin_map(xe, tile, m->eng->vm, XE_PAGE_SIZE,
+       pt = xe_bo_create_pin_map(xe, tile, m->q->vm, XE_PAGE_SIZE,
                                  ttm_bo_type_kernel,
                                  XE_BO_CREATE_VRAM_IF_DGFX(tile) |
                                  XE_BO_CREATE_PINNED_BIT);
@@ -276,7 +276,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
                goto free_big;
        }
 
-       tiny = xe_bo_create_pin_map(xe, tile, m->eng->vm,
+       tiny = xe_bo_create_pin_map(xe, tile, m->q->vm,
                                    2 * SZ_4K,
                                    ttm_bo_type_kernel,
                                    XE_BO_CREATE_VRAM_IF_DGFX(tile) |
@@ -295,14 +295,14 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
        }
 
        kunit_info(test, "Starting tests, top level PT addr: %lx, special pagetable base addr: %lx\n",
-                  (unsigned long)xe_bo_main_addr(m->eng->vm->pt_root[id]->bo, XE_PAGE_SIZE),
+                  (unsigned long)xe_bo_main_addr(m->q->vm->pt_root[id]->bo, XE_PAGE_SIZE),
                   (unsigned long)xe_bo_main_addr(m->pt_bo, XE_PAGE_SIZE));
 
        /* First part of the test, are we updating our pagetable bo with a new entry? */
        xe_map_wr(xe, &bo->vmap, XE_PAGE_SIZE * (NUM_KERNEL_PDE - 1), u64,
                  0xdeaddeadbeefbeef);
        expected = xe_pte_encode(pt, 0, XE_CACHE_WB, 0);
-       if (m->eng->vm->flags & XE_VM_FLAG_64K)
+       if (m->q->vm->flags & XE_VM_FLAG_64K)
                expected |= XE_PTE_PS64;
        if (xe_bo_is_vram(pt))
                xe_res_first(pt->ttm.resource, 0, pt->size, &src_it);
@@ -399,11 +399,11 @@ static int migrate_test_run_device(struct xe_device *xe)
                struct ww_acquire_ctx ww;
 
                kunit_info(test, "Testing tile id %d.\n", id);
-               xe_vm_lock(m->eng->vm, &ww, 0, true);
+               xe_vm_lock(m->q->vm, &ww, 0, true);
                xe_device_mem_access_get(xe);
                xe_migrate_sanity_test(m, test);
                xe_device_mem_access_put(xe);
-               xe_vm_unlock(m->eng->vm, &ww);
+               xe_vm_unlock(m->q->vm, &ww);
        }
 
        return 0;
index b15a7cb7db4cb05daf05c6c941113009d36bf328..38f4ce83a207e0e97482f5fe5ed8f75823befcf8 100644 (file)
@@ -7,7 +7,7 @@
 
 #include "regs/xe_gpu_commands.h"
 #include "xe_device.h"
-#include "xe_engine_types.h"
+#include "xe_exec_queue_types.h"
 #include "xe_gt.h"
 #include "xe_hw_fence.h"
 #include "xe_sa.h"
@@ -60,30 +60,30 @@ err:
 }
 
 static struct xe_sched_job *
-__xe_bb_create_job(struct xe_engine *kernel_eng, struct xe_bb *bb, u64 *addr)
+__xe_bb_create_job(struct xe_exec_queue *q, struct xe_bb *bb, u64 *addr)
 {
        u32 size = drm_suballoc_size(bb->bo);
 
        bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
 
-       WARN_ON(bb->len * 4 + bb_prefetch(kernel_eng->gt) > size);
+       WARN_ON(bb->len * 4 + bb_prefetch(q->gt) > size);
 
        xe_sa_bo_flush_write(bb->bo);
 
-       return xe_sched_job_create(kernel_eng, addr);
+       return xe_sched_job_create(q, addr);
 }
 
-struct xe_sched_job *xe_bb_create_wa_job(struct xe_engine *wa_eng,
+struct xe_sched_job *xe_bb_create_wa_job(struct xe_exec_queue *q,
                                         struct xe_bb *bb, u64 batch_base_ofs)
 {
        u64 addr = batch_base_ofs + drm_suballoc_soffset(bb->bo);
 
-       XE_WARN_ON(!(wa_eng->vm->flags & XE_VM_FLAG_MIGRATION));
+       XE_WARN_ON(!(q->vm->flags & XE_VM_FLAG_MIGRATION));
 
-       return __xe_bb_create_job(wa_eng, bb, &addr);
+       return __xe_bb_create_job(q, bb, &addr);
 }
 
-struct xe_sched_job *xe_bb_create_migration_job(struct xe_engine *kernel_eng,
+struct xe_sched_job *xe_bb_create_migration_job(struct xe_exec_queue *q,
                                                struct xe_bb *bb,
                                                u64 batch_base_ofs,
                                                u32 second_idx)
@@ -95,18 +95,18 @@ struct xe_sched_job *xe_bb_create_migration_job(struct xe_engine *kernel_eng,
        };
 
        XE_WARN_ON(second_idx > bb->len);
-       XE_WARN_ON(!(kernel_eng->vm->flags & XE_VM_FLAG_MIGRATION));
+       XE_WARN_ON(!(q->vm->flags & XE_VM_FLAG_MIGRATION));
 
-       return __xe_bb_create_job(kernel_eng, bb, addr);
+       return __xe_bb_create_job(q, bb, addr);
 }
 
-struct xe_sched_job *xe_bb_create_job(struct xe_engine *kernel_eng,
+struct xe_sched_job *xe_bb_create_job(struct xe_exec_queue *q,
                                      struct xe_bb *bb)
 {
        u64 addr = xe_sa_bo_gpu_addr(bb->bo);
 
-       XE_WARN_ON(kernel_eng->vm && kernel_eng->vm->flags & XE_VM_FLAG_MIGRATION);
-       return __xe_bb_create_job(kernel_eng, bb, &addr);
+       XE_WARN_ON(q->vm && q->vm->flags & XE_VM_FLAG_MIGRATION);
+       return __xe_bb_create_job(q, bb, &addr);
 }
 
 void xe_bb_free(struct xe_bb *bb, struct dma_fence *fence)
index 0cc9260c9634748d08b25a8bcfa09bb8ab3b53d6..c5ae0770bab5c3cba6dbd1b507184a4067ef15e9 100644 (file)
 struct dma_fence;
 
 struct xe_gt;
-struct xe_engine;
+struct xe_exec_queue;
 struct xe_sched_job;
 
 struct xe_bb *xe_bb_new(struct xe_gt *gt, u32 size, bool usm);
-struct xe_sched_job *xe_bb_create_job(struct xe_engine *kernel_eng,
+struct xe_sched_job *xe_bb_create_job(struct xe_exec_queue *q,
                                      struct xe_bb *bb);
-struct xe_sched_job *xe_bb_create_migration_job(struct xe_engine *kernel_eng,
+struct xe_sched_job *xe_bb_create_migration_job(struct xe_exec_queue *q,
                                                struct xe_bb *bb, u64 batch_ofs,
                                                u32 second_idx);
-struct xe_sched_job *xe_bb_create_wa_job(struct xe_engine *wa_eng,
+struct xe_sched_job *xe_bb_create_wa_job(struct xe_exec_queue *q,
                                         struct xe_bb *bb, u64 batch_ofs);
 void xe_bb_free(struct xe_bb *bb, struct dma_fence *fence);
 
index 61ff97ea765931931e296e0c234bc52b4e3b94a3..68abc0b195beb897af366cc171a01acc47ec836e 100644 (file)
@@ -53,9 +53,9 @@ static struct xe_device *coredump_to_xe(const struct xe_devcoredump *coredump)
        return container_of(coredump, struct xe_device, devcoredump);
 }
 
-static struct xe_guc *engine_to_guc(struct xe_engine *e)
+static struct xe_guc *exec_queue_to_guc(struct xe_exec_queue *q)
 {
-       return &e->gt->uc.guc;
+       return &q->gt->uc.guc;
 }
 
 static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
@@ -91,7 +91,7 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
 
        drm_printf(&p, "\n**** GuC CT ****\n");
        xe_guc_ct_snapshot_print(coredump->snapshot.ct, &p);
-       xe_guc_engine_snapshot_print(coredump->snapshot.ge, &p);
+       xe_guc_exec_queue_snapshot_print(coredump->snapshot.ge, &p);
 
        drm_printf(&p, "\n**** HW Engines ****\n");
        for (i = 0; i < XE_NUM_HW_ENGINES; i++)
@@ -112,7 +112,7 @@ static void xe_devcoredump_free(void *data)
                return;
 
        xe_guc_ct_snapshot_free(coredump->snapshot.ct);
-       xe_guc_engine_snapshot_free(coredump->snapshot.ge);
+       xe_guc_exec_queue_snapshot_free(coredump->snapshot.ge);
        for (i = 0; i < XE_NUM_HW_ENGINES; i++)
                if (coredump->snapshot.hwe[i])
                        xe_hw_engine_snapshot_free(coredump->snapshot.hwe[i]);
@@ -123,14 +123,14 @@ static void xe_devcoredump_free(void *data)
 }
 
 static void devcoredump_snapshot(struct xe_devcoredump *coredump,
-                                struct xe_engine *e)
+                                struct xe_exec_queue *q)
 {
        struct xe_devcoredump_snapshot *ss = &coredump->snapshot;
-       struct xe_guc *guc = engine_to_guc(e);
+       struct xe_guc *guc = exec_queue_to_guc(q);
        struct xe_hw_engine *hwe;
        enum xe_hw_engine_id id;
-       u32 adj_logical_mask = e->logical_mask;
-       u32 width_mask = (0x1 << e->width) - 1;
+       u32 adj_logical_mask = q->logical_mask;
+       u32 width_mask = (0x1 << q->width) - 1;
        int i;
        bool cookie;
 
@@ -138,22 +138,22 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
        ss->boot_time = ktime_get_boottime();
 
        cookie = dma_fence_begin_signalling();
-       for (i = 0; e->width > 1 && i < XE_HW_ENGINE_MAX_INSTANCE;) {
+       for (i = 0; q->width > 1 && i < XE_HW_ENGINE_MAX_INSTANCE;) {
                if (adj_logical_mask & BIT(i)) {
                        adj_logical_mask |= width_mask << i;
-                       i += e->width;
+                       i += q->width;
                } else {
                        ++i;
                }
        }
 
-       xe_force_wake_get(gt_to_fw(e->gt), XE_FORCEWAKE_ALL);
+       xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
 
        coredump->snapshot.ct = xe_guc_ct_snapshot_capture(&guc->ct, true);
-       coredump->snapshot.ge = xe_guc_engine_snapshot_capture(e);
+       coredump->snapshot.ge = xe_guc_exec_queue_snapshot_capture(q);
 
-       for_each_hw_engine(hwe, e->gt, id) {
-               if (hwe->class != e->hwe->class ||
+       for_each_hw_engine(hwe, q->gt, id) {
+               if (hwe->class != q->hwe->class ||
                    !(BIT(hwe->logical_instance) & adj_logical_mask)) {
                        coredump->snapshot.hwe[id] = NULL;
                        continue;
@@ -161,21 +161,21 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
                coredump->snapshot.hwe[id] = xe_hw_engine_snapshot_capture(hwe);
        }
 
-       xe_force_wake_put(gt_to_fw(e->gt), XE_FORCEWAKE_ALL);
+       xe_force_wake_put(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
        dma_fence_end_signalling(cookie);
 }
 
 /**
  * xe_devcoredump - Take the required snapshots and initialize coredump device.
- * @e: The faulty xe_engine, where the issue was detected.
+ * @q: The faulty xe_exec_queue, where the issue was detected.
  *
  * This function should be called at the crash time within the serialized
  * gt_reset. It is skipped if we still have the core dump device available
  * with the information of the 'first' snapshot.
  */
-void xe_devcoredump(struct xe_engine *e)
+void xe_devcoredump(struct xe_exec_queue *q)
 {
-       struct xe_device *xe = gt_to_xe(e->gt);
+       struct xe_device *xe = gt_to_xe(q->gt);
        struct xe_devcoredump *coredump = &xe->devcoredump;
 
        if (coredump->captured) {
@@ -184,7 +184,7 @@ void xe_devcoredump(struct xe_engine *e)
        }
 
        coredump->captured = true;
-       devcoredump_snapshot(coredump, e);
+       devcoredump_snapshot(coredump, q);
 
        drm_info(&xe->drm, "Xe device coredump has been created\n");
        drm_info(&xe->drm, "Check your /sys/class/drm/card%d/device/devcoredump/data\n",
index 8548821292275d1a0e86921ae0adf3d694b3035a..6ac218a5c1945886d1d5d8335c412160ad9f6f6e 100644 (file)
@@ -7,12 +7,12 @@
 #define _XE_DEVCOREDUMP_H_
 
 struct xe_device;
-struct xe_engine;
+struct xe_exec_queue;
 
 #ifdef CONFIG_DEV_COREDUMP
-void xe_devcoredump(struct xe_engine *e);
+void xe_devcoredump(struct xe_exec_queue *q);
 #else
-static inline void xe_devcoredump(struct xe_engine *e)
+static inline void xe_devcoredump(struct xe_exec_queue *q)
 {
 }
 #endif
index c0d711eb6ab31d8a915c98b179f167d6d1899b21..7fdad9c3d3dde62cb2aca573f7a8a404b52bd136 100644 (file)
@@ -30,7 +30,7 @@ struct xe_devcoredump_snapshot {
        /** @ct: GuC CT snapshot */
        struct xe_guc_ct_snapshot *ct;
        /** @ge: Guc Engine snapshot */
-       struct xe_guc_submit_engine_snapshot *ge;
+       struct xe_guc_submit_exec_queue_snapshot *ge;
        /** @hwe: HW Engine snapshot array */
        struct xe_hw_engine_snapshot *hwe[XE_NUM_HW_ENGINES];
 };
index a8ab86379ed67975805e2788555fdf9d722b4cfe..df1953759c670ffe465bc367c242fd3053da477b 100644 (file)
@@ -53,33 +53,33 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file)
        mutex_init(&xef->vm.lock);
        xa_init_flags(&xef->vm.xa, XA_FLAGS_ALLOC1);
 
-       mutex_init(&xef->engine.lock);
-       xa_init_flags(&xef->engine.xa, XA_FLAGS_ALLOC1);
+       mutex_init(&xef->exec_queue.lock);
+       xa_init_flags(&xef->exec_queue.xa, XA_FLAGS_ALLOC1);
 
        file->driver_priv = xef;
        return 0;
 }
 
-static void device_kill_persistent_engines(struct xe_device *xe,
-                                          struct xe_file *xef);
+static void device_kill_persistent_exec_queues(struct xe_device *xe,
+                                              struct xe_file *xef);
 
 static void xe_file_close(struct drm_device *dev, struct drm_file *file)
 {
        struct xe_device *xe = to_xe_device(dev);
        struct xe_file *xef = file->driver_priv;
        struct xe_vm *vm;
-       struct xe_engine *e;
+       struct xe_exec_queue *q;
        unsigned long idx;
 
-       mutex_lock(&xef->engine.lock);
-       xa_for_each(&xef->engine.xa, idx, e) {
-               xe_engine_kill(e);
-               xe_engine_put(e);
+       mutex_lock(&xef->exec_queue.lock);
+       xa_for_each(&xef->exec_queue.xa, idx, q) {
+               xe_exec_queue_kill(q);
+               xe_exec_queue_put(q);
        }
-       mutex_unlock(&xef->engine.lock);
-       xa_destroy(&xef->engine.xa);
-       mutex_destroy(&xef->engine.lock);
-       device_kill_persistent_engines(xe, xef);
+       mutex_unlock(&xef->exec_queue.lock);
+       xa_destroy(&xef->exec_queue.xa);
+       mutex_destroy(&xef->exec_queue.lock);
+       device_kill_persistent_exec_queues(xe, xef);
 
        mutex_lock(&xef->vm.lock);
        xa_for_each(&xef->vm.xa, idx, vm)
@@ -99,15 +99,15 @@ static const struct drm_ioctl_desc xe_ioctls[] = {
        DRM_IOCTL_DEF_DRV(XE_VM_CREATE, xe_vm_create_ioctl, DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(XE_VM_DESTROY, xe_vm_destroy_ioctl, DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(XE_VM_BIND, xe_vm_bind_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(XE_ENGINE_CREATE, xe_engine_create_ioctl,
+       DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_CREATE, xe_exec_queue_create_ioctl,
                          DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(XE_ENGINE_GET_PROPERTY, xe_engine_get_property_ioctl,
+       DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_GET_PROPERTY, xe_exec_queue_get_property_ioctl,
                          DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(XE_ENGINE_DESTROY, xe_engine_destroy_ioctl,
+       DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_DESTROY, xe_exec_queue_destroy_ioctl,
                          DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(XE_EXEC, xe_exec_ioctl, DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(XE_MMIO, xe_mmio_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(XE_ENGINE_SET_PROPERTY, xe_engine_set_property_ioctl,
+       DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_SET_PROPERTY, xe_exec_queue_set_property_ioctl,
                          DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE, xe_wait_user_fence_ioctl,
                          DRM_RENDER_ALLOW),
@@ -324,33 +324,33 @@ void xe_device_shutdown(struct xe_device *xe)
 {
 }
 
-void xe_device_add_persistent_engines(struct xe_device *xe, struct xe_engine *e)
+void xe_device_add_persistent_exec_queues(struct xe_device *xe, struct xe_exec_queue *q)
 {
        mutex_lock(&xe->persistent_engines.lock);
-       list_add_tail(&e->persistent.link, &xe->persistent_engines.list);
+       list_add_tail(&q->persistent.link, &xe->persistent_engines.list);
        mutex_unlock(&xe->persistent_engines.lock);
 }
 
-void xe_device_remove_persistent_engines(struct xe_device *xe,
-                                        struct xe_engine *e)
+void xe_device_remove_persistent_exec_queues(struct xe_device *xe,
+                                            struct xe_exec_queue *q)
 {
        mutex_lock(&xe->persistent_engines.lock);
-       if (!list_empty(&e->persistent.link))
-               list_del(&e->persistent.link);
+       if (!list_empty(&q->persistent.link))
+               list_del(&q->persistent.link);
        mutex_unlock(&xe->persistent_engines.lock);
 }
 
-static void device_kill_persistent_engines(struct xe_device *xe,
-                                          struct xe_file *xef)
+static void device_kill_persistent_exec_queues(struct xe_device *xe,
+                                              struct xe_file *xef)
 {
-       struct xe_engine *e, *next;
+       struct xe_exec_queue *q, *next;
 
        mutex_lock(&xe->persistent_engines.lock);
-       list_for_each_entry_safe(e, next, &xe->persistent_engines.list,
+       list_for_each_entry_safe(q, next, &xe->persistent_engines.list,
                                 persistent.link)
-               if (e->persistent.xef == xef) {
-                       xe_engine_kill(e);
-                       list_del_init(&e->persistent.link);
+               if (q->persistent.xef == xef) {
+                       xe_exec_queue_kill(q);
+                       list_del_init(&q->persistent.link);
                }
        mutex_unlock(&xe->persistent_engines.lock);
 }
index 61a5cf1f73006489e12c37f923ba31808cc0136a..71582094834c659d5175292f3a2af3b3357fe2ab 100644 (file)
@@ -6,7 +6,7 @@
 #ifndef _XE_DEVICE_H_
 #define _XE_DEVICE_H_
 
-struct xe_engine;
+struct xe_exec_queue;
 struct xe_file;
 
 #include <drm/drm_util.h>
@@ -41,9 +41,9 @@ int xe_device_probe(struct xe_device *xe);
 void xe_device_remove(struct xe_device *xe);
 void xe_device_shutdown(struct xe_device *xe);
 
-void xe_device_add_persistent_engines(struct xe_device *xe, struct xe_engine *e);
-void xe_device_remove_persistent_engines(struct xe_device *xe,
-                                        struct xe_engine *e);
+void xe_device_add_persistent_exec_queues(struct xe_device *xe, struct xe_exec_queue *q);
+void xe_device_remove_persistent_exec_queues(struct xe_device *xe,
+                                            struct xe_exec_queue *q);
 
 void xe_device_wmb(struct xe_device *xe);
 
index c521ffaf387196d1c71e3f6647fc1f3280a31d29..128e0a95369299eab1ef7715e2ee5b349b7c37db 100644 (file)
@@ -377,13 +377,13 @@ struct xe_file {
                struct mutex lock;
        } vm;
 
-       /** @engine: Submission engine state for file */
+       /** @exec_queue: Submission exec queue state for file */
        struct {
                /** @xe: xarray to store engines */
                struct xarray xa;
                /** @lock: protects file engine state */
                struct mutex lock;
-       } engine;
+       } exec_queue;
 };
 
 #endif
diff --git a/drivers/gpu/drm/xe/xe_engine_types.h b/drivers/gpu/drm/xe/xe_engine_types.h
deleted file mode 100644 (file)
index f1d5317..0000000
+++ /dev/null
@@ -1,209 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright Â© 2022 Intel Corporation
- */
-
-#ifndef _XE_ENGINE_TYPES_H_
-#define _XE_ENGINE_TYPES_H_
-
-#include <linux/kref.h>
-
-#include <drm/gpu_scheduler.h>
-
-#include "xe_gpu_scheduler_types.h"
-#include "xe_hw_engine_types.h"
-#include "xe_hw_fence_types.h"
-#include "xe_lrc_types.h"
-
-struct xe_execlist_engine;
-struct xe_gt;
-struct xe_guc_engine;
-struct xe_hw_engine;
-struct xe_vm;
-
-enum xe_engine_priority {
-       XE_ENGINE_PRIORITY_UNSET = -2, /* For execlist usage only */
-       XE_ENGINE_PRIORITY_LOW = 0,
-       XE_ENGINE_PRIORITY_NORMAL,
-       XE_ENGINE_PRIORITY_HIGH,
-       XE_ENGINE_PRIORITY_KERNEL,
-
-       XE_ENGINE_PRIORITY_COUNT
-};
-
-/**
- * struct xe_engine - Submission engine
- *
- * Contains all state necessary for submissions. Can either be a user object or
- * a kernel object.
- */
-struct xe_engine {
-       /** @gt: graphics tile this engine can submit to */
-       struct xe_gt *gt;
-       /**
-        * @hwe: A hardware of the same class. May (physical engine) or may not
-        * (virtual engine) be where jobs actual engine up running. Should never
-        * really be used for submissions.
-        */
-       struct xe_hw_engine *hwe;
-       /** @refcount: ref count of this engine */
-       struct kref refcount;
-       /** @vm: VM (address space) for this engine */
-       struct xe_vm *vm;
-       /** @class: class of this engine */
-       enum xe_engine_class class;
-       /** @priority: priority of this exec queue */
-       enum xe_engine_priority priority;
-       /**
-        * @logical_mask: logical mask of where job submitted to engine can run
-        */
-       u32 logical_mask;
-       /** @name: name of this engine */
-       char name[MAX_FENCE_NAME_LEN];
-       /** @width: width (number BB submitted per exec) of this engine */
-       u16 width;
-       /** @fence_irq: fence IRQ used to signal job completion */
-       struct xe_hw_fence_irq *fence_irq;
-
-#define ENGINE_FLAG_BANNED             BIT(0)
-#define ENGINE_FLAG_KERNEL             BIT(1)
-#define ENGINE_FLAG_PERSISTENT         BIT(2)
-#define ENGINE_FLAG_COMPUTE_MODE       BIT(3)
-/* Caller needs to hold rpm ref when creating engine with ENGINE_FLAG_VM */
-#define ENGINE_FLAG_VM                 BIT(4)
-#define ENGINE_FLAG_BIND_ENGINE_CHILD  BIT(5)
-#define ENGINE_FLAG_WA                 BIT(6)
-
-       /**
-        * @flags: flags for this engine, should statically setup aside from ban
-        * bit
-        */
-       unsigned long flags;
-
-       union {
-               /** @multi_gt_list: list head for VM bind engines if multi-GT */
-               struct list_head multi_gt_list;
-               /** @multi_gt_link: link for VM bind engines if multi-GT */
-               struct list_head multi_gt_link;
-       };
-
-       union {
-               /** @execlist: execlist backend specific state for engine */
-               struct xe_execlist_engine *execlist;
-               /** @guc: GuC backend specific state for engine */
-               struct xe_guc_engine *guc;
-       };
-
-       /**
-        * @persistent: persistent engine state
-        */
-       struct {
-               /** @xef: file which this engine belongs to */
-               struct xe_file *xef;
-               /** @link: link in list of persistent engines */
-               struct list_head link;
-       } persistent;
-
-       union {
-               /**
-                * @parallel: parallel submission state
-                */
-               struct {
-                       /** @composite_fence_ctx: context composite fence */
-                       u64 composite_fence_ctx;
-                       /** @composite_fence_seqno: seqno for composite fence */
-                       u32 composite_fence_seqno;
-               } parallel;
-               /**
-                * @bind: bind submission state
-                */
-               struct {
-                       /** @fence_ctx: context bind fence */
-                       u64 fence_ctx;
-                       /** @fence_seqno: seqno for bind fence */
-                       u32 fence_seqno;
-               } bind;
-       };
-
-       /** @sched_props: scheduling properties */
-       struct {
-               /** @timeslice_us: timeslice period in micro-seconds */
-               u32 timeslice_us;
-               /** @preempt_timeout_us: preemption timeout in micro-seconds */
-               u32 preempt_timeout_us;
-       } sched_props;
-
-       /** @compute: compute engine state */
-       struct {
-               /** @pfence: preemption fence */
-               struct dma_fence *pfence;
-               /** @context: preemption fence context */
-               u64 context;
-               /** @seqno: preemption fence seqno */
-               u32 seqno;
-               /** @link: link into VM's list of engines */
-               struct list_head link;
-               /** @lock: preemption fences lock */
-               spinlock_t lock;
-       } compute;
-
-       /** @usm: unified shared memory state */
-       struct {
-               /** @acc_trigger: access counter trigger */
-               u32 acc_trigger;
-               /** @acc_notify: access counter notify */
-               u32 acc_notify;
-               /** @acc_granularity: access counter granularity */
-               u32 acc_granularity;
-       } usm;
-
-       /** @ops: submission backend engine operations */
-       const struct xe_engine_ops *ops;
-
-       /** @ring_ops: ring operations for this engine */
-       const struct xe_ring_ops *ring_ops;
-       /** @entity: DRM sched entity for this engine (1 to 1 relationship) */
-       struct drm_sched_entity *entity;
-       /** @lrc: logical ring context for this engine */
-       struct xe_lrc lrc[];
-};
-
-/**
- * struct xe_engine_ops - Submission backend engine operations
- */
-struct xe_engine_ops {
-       /** @init: Initialize engine for submission backend */
-       int (*init)(struct xe_engine *e);
-       /** @kill: Kill inflight submissions for backend */
-       void (*kill)(struct xe_engine *e);
-       /** @fini: Fini engine for submission backend */
-       void (*fini)(struct xe_engine *e);
-       /** @set_priority: Set priority for engine */
-       int (*set_priority)(struct xe_engine *e,
-                           enum xe_engine_priority priority);
-       /** @set_timeslice: Set timeslice for engine */
-       int (*set_timeslice)(struct xe_engine *e, u32 timeslice_us);
-       /** @set_preempt_timeout: Set preemption timeout for engine */
-       int (*set_preempt_timeout)(struct xe_engine *e, u32 preempt_timeout_us);
-       /** @set_job_timeout: Set job timeout for engine */
-       int (*set_job_timeout)(struct xe_engine *e, u32 job_timeout_ms);
-       /**
-        * @suspend: Suspend engine from executing, allowed to be called
-        * multiple times in a row before resume with the caveat that
-        * suspend_wait returns before calling suspend again.
-        */
-       int (*suspend)(struct xe_engine *e);
-       /**
-        * @suspend_wait: Wait for an engine to suspend executing, should be
-        * call after suspend.
-        */
-       void (*suspend_wait)(struct xe_engine *e);
-       /**
-        * @resume: Resume engine execution, engine must be in a suspended
-        * state and dma fence returned from most recent suspend call must be
-        * signalled when this function is called.
-        */
-       void (*resume)(struct xe_engine *e);
-};
-
-#endif
index a043c649249b5ec30667ba33b18898744993fea4..629d81a789e77606a5b55daaee31289d84a44493 100644 (file)
 
 #define XE_EXEC_BIND_RETRY_TIMEOUT_MS 1000
 
-static int xe_exec_begin(struct xe_engine *e, struct ww_acquire_ctx *ww,
+static int xe_exec_begin(struct xe_exec_queue *q, struct ww_acquire_ctx *ww,
                         struct ttm_validate_buffer tv_onstack[],
                         struct ttm_validate_buffer **tv,
                         struct list_head *objs)
 {
-       struct xe_vm *vm = e->vm;
+       struct xe_vm *vm = q->vm;
        struct xe_vma *vma;
        LIST_HEAD(dups);
        ktime_t end = 0;
        int err = 0;
 
        *tv = NULL;
-       if (xe_vm_no_dma_fences(e->vm))
+       if (xe_vm_no_dma_fences(q->vm))
                return 0;
 
 retry:
@@ -153,14 +153,14 @@ retry:
        return err;
 }
 
-static void xe_exec_end(struct xe_engine *e,
+static void xe_exec_end(struct xe_exec_queue *q,
                        struct ttm_validate_buffer *tv_onstack,
                        struct ttm_validate_buffer *tv,
                        struct ww_acquire_ctx *ww,
                        struct list_head *objs)
 {
-       if (!xe_vm_no_dma_fences(e->vm))
-               xe_vm_unlock_dma_resv(e->vm, tv_onstack, tv, ww, objs);
+       if (!xe_vm_no_dma_fences(q->vm))
+               xe_vm_unlock_dma_resv(q->vm, tv_onstack, tv, ww, objs);
 }
 
 int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
@@ -170,7 +170,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        struct drm_xe_exec *args = data;
        struct drm_xe_sync __user *syncs_user = u64_to_user_ptr(args->syncs);
        u64 __user *addresses_user = u64_to_user_ptr(args->address);
-       struct xe_engine *engine;
+       struct xe_exec_queue *q;
        struct xe_sync_entry *syncs = NULL;
        u64 addresses[XE_HW_ENGINE_MAX_INSTANCE];
        struct ttm_validate_buffer tv_onstack[XE_ONSTACK_TV];
@@ -189,30 +189,30 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
            XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
                return -EINVAL;
 
-       engine = xe_engine_lookup(xef, args->engine_id);
-       if (XE_IOCTL_DBG(xe, !engine))
+       q = xe_exec_queue_lookup(xef, args->exec_queue_id);
+       if (XE_IOCTL_DBG(xe, !q))
                return -ENOENT;
 
-       if (XE_IOCTL_DBG(xe, engine->flags & ENGINE_FLAG_VM))
+       if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_VM))
                return -EINVAL;
 
-       if (XE_IOCTL_DBG(xe, engine->width != args->num_batch_buffer))
+       if (XE_IOCTL_DBG(xe, q->width != args->num_batch_buffer))
                return -EINVAL;
 
-       if (XE_IOCTL_DBG(xe, engine->flags & ENGINE_FLAG_BANNED)) {
+       if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_BANNED)) {
                err = -ECANCELED;
-               goto err_engine;
+               goto err_exec_queue;
        }
 
        if (args->num_syncs) {
                syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
                if (!syncs) {
                        err = -ENOMEM;
-                       goto err_engine;
+                       goto err_exec_queue;
                }
        }
 
-       vm = engine->vm;
+       vm = q->vm;
 
        for (i = 0; i < args->num_syncs; i++) {
                err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs++],
@@ -222,9 +222,9 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
                        goto err_syncs;
        }
 
-       if (xe_engine_is_parallel(engine)) {
+       if (xe_exec_queue_is_parallel(q)) {
                err = __copy_from_user(addresses, addresses_user, sizeof(u64) *
-                                      engine->width);
+                                      q->width);
                if (err) {
                        err = -EFAULT;
                        goto err_syncs;
@@ -294,26 +294,26 @@ retry:
                        goto err_unlock_list;
        }
 
-       err = xe_exec_begin(engine, &ww, tv_onstack, &tv, &objs);
+       err = xe_exec_begin(q, &ww, tv_onstack, &tv, &objs);
        if (err)
                goto err_unlock_list;
 
-       if (xe_vm_is_closed_or_banned(engine->vm)) {
+       if (xe_vm_is_closed_or_banned(q->vm)) {
                drm_warn(&xe->drm, "Trying to schedule after vm is closed or banned\n");
                err = -ECANCELED;
-               goto err_engine_end;
+               goto err_exec_queue_end;
        }
 
-       if (xe_engine_is_lr(engine) && xe_engine_ring_full(engine)) {
+       if (xe_exec_queue_is_lr(q) && xe_exec_queue_ring_full(q)) {
                err = -EWOULDBLOCK;
-               goto err_engine_end;
+               goto err_exec_queue_end;
        }
 
-       job = xe_sched_job_create(engine, xe_engine_is_parallel(engine) ?
+       job = xe_sched_job_create(q, xe_exec_queue_is_parallel(q) ?
                                  addresses : &args->address);
        if (IS_ERR(job)) {
                err = PTR_ERR(job);
-               goto err_engine_end;
+               goto err_exec_queue_end;
        }
 
        /*
@@ -395,8 +395,8 @@ retry:
                xe_sync_entry_signal(&syncs[i], job,
                                     &job->drm.s_fence->finished);
 
-       if (xe_engine_is_lr(engine))
-               engine->ring_ops->emit_job(job);
+       if (xe_exec_queue_is_lr(q))
+               q->ring_ops->emit_job(job);
        xe_sched_job_push(job);
        xe_vm_reactivate_rebind(vm);
 
@@ -412,8 +412,8 @@ err_repin:
 err_put_job:
        if (err)
                xe_sched_job_put(job);
-err_engine_end:
-       xe_exec_end(engine, tv_onstack, tv, &ww, &objs);
+err_exec_queue_end:
+       xe_exec_end(q, tv_onstack, tv, &ww, &objs);
 err_unlock_list:
        if (write_locked)
                up_write(&vm->lock);
@@ -425,8 +425,8 @@ err_syncs:
        for (i = 0; i < num_syncs; i++)
                xe_sync_entry_cleanup(&syncs[i]);
        kfree(syncs);
-err_engine:
-       xe_engine_put(engine);
+err_exec_queue:
+       xe_exec_queue_put(q);
 
        return err;
 }
index f1cfc4b604d488127802b4e521050e09ca769415..1371829b9e35645c54ef0711e35810d8a4037049 100644 (file)
 #include "xe_trace.h"
 #include "xe_vm.h"
 
-static struct xe_engine *__xe_engine_create(struct xe_device *xe,
-                                           struct xe_vm *vm,
-                                           u32 logical_mask,
-                                           u16 width, struct xe_hw_engine *hwe,
-                                           u32 flags)
+static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe,
+                                                   struct xe_vm *vm,
+                                                   u32 logical_mask,
+                                                   u16 width, struct xe_hw_engine *hwe,
+                                                   u32 flags)
 {
-       struct xe_engine *e;
+       struct xe_exec_queue *q;
        struct xe_gt *gt = hwe->gt;
        int err;
        int i;
 
-       e = kzalloc(sizeof(*e) + sizeof(struct xe_lrc) * width, GFP_KERNEL);
-       if (!e)
+       q = kzalloc(sizeof(*q) + sizeof(struct xe_lrc) * width, GFP_KERNEL);
+       if (!q)
                return ERR_PTR(-ENOMEM);
 
-       kref_init(&e->refcount);
-       e->flags = flags;
-       e->hwe = hwe;
-       e->gt = gt;
+       kref_init(&q->refcount);
+       q->flags = flags;
+       q->hwe = hwe;
+       q->gt = gt;
        if (vm)
-               e->vm = xe_vm_get(vm);
-       e->class = hwe->class;
-       e->width = width;
-       e->logical_mask = logical_mask;
-       e->fence_irq = &gt->fence_irq[hwe->class];
-       e->ring_ops = gt->ring_ops[hwe->class];
-       e->ops = gt->engine_ops;
-       INIT_LIST_HEAD(&e->persistent.link);
-       INIT_LIST_HEAD(&e->compute.link);
-       INIT_LIST_HEAD(&e->multi_gt_link);
+               q->vm = xe_vm_get(vm);
+       q->class = hwe->class;
+       q->width = width;
+       q->logical_mask = logical_mask;
+       q->fence_irq = &gt->fence_irq[hwe->class];
+       q->ring_ops = gt->ring_ops[hwe->class];
+       q->ops = gt->exec_queue_ops;
+       INIT_LIST_HEAD(&q->persistent.link);
+       INIT_LIST_HEAD(&q->compute.link);
+       INIT_LIST_HEAD(&q->multi_gt_link);
 
        /* FIXME: Wire up to configurable default value */
-       e->sched_props.timeslice_us = 1 * 1000;
-       e->sched_props.preempt_timeout_us = 640 * 1000;
+       q->sched_props.timeslice_us = 1 * 1000;
+       q->sched_props.preempt_timeout_us = 640 * 1000;
 
-       if (xe_engine_is_parallel(e)) {
-               e->parallel.composite_fence_ctx = dma_fence_context_alloc(1);
-               e->parallel.composite_fence_seqno = XE_FENCE_INITIAL_SEQNO;
+       if (xe_exec_queue_is_parallel(q)) {
+               q->parallel.composite_fence_ctx = dma_fence_context_alloc(1);
+               q->parallel.composite_fence_seqno = XE_FENCE_INITIAL_SEQNO;
        }
-       if (e->flags & ENGINE_FLAG_VM) {
-               e->bind.fence_ctx = dma_fence_context_alloc(1);
-               e->bind.fence_seqno = XE_FENCE_INITIAL_SEQNO;
+       if (q->flags & EXEC_QUEUE_FLAG_VM) {
+               q->bind.fence_ctx = dma_fence_context_alloc(1);
+               q->bind.fence_seqno = XE_FENCE_INITIAL_SEQNO;
        }
 
        for (i = 0; i < width; ++i) {
-               err = xe_lrc_init(e->lrc + i, hwe, e, vm, SZ_16K);
+               err = xe_lrc_init(q->lrc + i, hwe, q, vm, SZ_16K);
                if (err)
                        goto err_lrc;
        }
 
-       err = e->ops->init(e);
+       err = q->ops->init(q);
        if (err)
                goto err_lrc;
 
@@ -84,24 +84,24 @@ static struct xe_engine *__xe_engine_create(struct xe_device *xe,
         * can perform GuC CT actions when needed. Caller is expected to
         * have already grabbed the rpm ref outside any sensitive locks.
         */
-       if (e->flags & ENGINE_FLAG_VM)
+       if (q->flags & EXEC_QUEUE_FLAG_VM)
                drm_WARN_ON(&xe->drm, !xe_device_mem_access_get_if_ongoing(xe));
 
-       return e;
+       return q;
 
 err_lrc:
        for (i = i - 1; i >= 0; --i)
-               xe_lrc_finish(e->lrc + i);
-       kfree(e);
+               xe_lrc_finish(q->lrc + i);
+       kfree(q);
        return ERR_PTR(err);
 }
 
-struct xe_engine *xe_engine_create(struct xe_device *xe, struct xe_vm *vm,
-                                  u32 logical_mask, u16 width,
-                                  struct xe_hw_engine *hwe, u32 flags)
+struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
+                                          u32 logical_mask, u16 width,
+                                          struct xe_hw_engine *hwe, u32 flags)
 {
        struct ww_acquire_ctx ww;
-       struct xe_engine *e;
+       struct xe_exec_queue *q;
        int err;
 
        if (vm) {
@@ -109,16 +109,16 @@ struct xe_engine *xe_engine_create(struct xe_device *xe, struct xe_vm *vm,
                if (err)
                        return ERR_PTR(err);
        }
-       e = __xe_engine_create(xe, vm, logical_mask, width, hwe, flags);
+       q = __xe_exec_queue_create(xe, vm, logical_mask, width, hwe, flags);
        if (vm)
                xe_vm_unlock(vm, &ww);
 
-       return e;
+       return q;
 }
 
-struct xe_engine *xe_engine_create_class(struct xe_device *xe, struct xe_gt *gt,
-                                        struct xe_vm *vm,
-                                        enum xe_engine_class class, u32 flags)
+struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
+                                                struct xe_vm *vm,
+                                                enum xe_engine_class class, u32 flags)
 {
        struct xe_hw_engine *hwe, *hwe0 = NULL;
        enum xe_hw_engine_id id;
@@ -138,102 +138,102 @@ struct xe_engine *xe_engine_create_class(struct xe_device *xe, struct xe_gt *gt,
        if (!logical_mask)
                return ERR_PTR(-ENODEV);
 
-       return xe_engine_create(xe, vm, logical_mask, 1, hwe0, flags);
+       return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags);
 }
 
-void xe_engine_destroy(struct kref *ref)
+void xe_exec_queue_destroy(struct kref *ref)
 {
-       struct xe_engine *e = container_of(ref, struct xe_engine, refcount);
-       struct xe_engine *engine, *next;
+       struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
+       struct xe_exec_queue *eq, *next;
 
-       if (!(e->flags & ENGINE_FLAG_BIND_ENGINE_CHILD)) {
-               list_for_each_entry_safe(engine, next, &e->multi_gt_list,
+       if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) {
+               list_for_each_entry_safe(eq, next, &q->multi_gt_list,
                                         multi_gt_link)
-                       xe_engine_put(engine);
+                       xe_exec_queue_put(eq);
        }
 
-       e->ops->fini(e);
+       q->ops->fini(q);
 }
 
-void xe_engine_fini(struct xe_engine *e)
+void xe_exec_queue_fini(struct xe_exec_queue *q)
 {
        int i;
 
-       for (i = 0; i < e->width; ++i)
-               xe_lrc_finish(e->lrc + i);
-       if (e->vm)
-               xe_vm_put(e->vm);
-       if (e->flags & ENGINE_FLAG_VM)
-               xe_device_mem_access_put(gt_to_xe(e->gt));
+       for (i = 0; i < q->width; ++i)
+               xe_lrc_finish(q->lrc + i);
+       if (q->vm)
+               xe_vm_put(q->vm);
+       if (q->flags & EXEC_QUEUE_FLAG_VM)
+               xe_device_mem_access_put(gt_to_xe(q->gt));
 
-       kfree(e);
+       kfree(q);
 }
 
-struct xe_engine *xe_engine_lookup(struct xe_file *xef, u32 id)
+struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id)
 {
-       struct xe_engine *e;
+       struct xe_exec_queue *q;
 
-       mutex_lock(&xef->engine.lock);
-       e = xa_load(&xef->engine.xa, id);
-       if (e)
-               xe_engine_get(e);
-       mutex_unlock(&xef->engine.lock);
+       mutex_lock(&xef->exec_queue.lock);
+       q = xa_load(&xef->exec_queue.xa, id);
+       if (q)
+               xe_exec_queue_get(q);
+       mutex_unlock(&xef->exec_queue.lock);
 
-       return e;
+       return q;
 }
 
-enum xe_engine_priority
-xe_engine_device_get_max_priority(struct xe_device *xe)
+enum xe_exec_queue_priority
+xe_exec_queue_device_get_max_priority(struct xe_device *xe)
 {
-       return capable(CAP_SYS_NICE) ? XE_ENGINE_PRIORITY_HIGH :
-                                      XE_ENGINE_PRIORITY_NORMAL;
+       return capable(CAP_SYS_NICE) ? XE_EXEC_QUEUE_PRIORITY_HIGH :
+                                      XE_EXEC_QUEUE_PRIORITY_NORMAL;
 }
 
-static int engine_set_priority(struct xe_device *xe, struct xe_engine *e,
-                              u64 value, bool create)
+static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q,
+                                  u64 value, bool create)
 {
-       if (XE_IOCTL_DBG(xe, value > XE_ENGINE_PRIORITY_HIGH))
+       if (XE_IOCTL_DBG(xe, value > XE_EXEC_QUEUE_PRIORITY_HIGH))
                return -EINVAL;
 
-       if (XE_IOCTL_DBG(xe, value > xe_engine_device_get_max_priority(xe)))
+       if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe)))
                return -EPERM;
 
-       return e->ops->set_priority(e, value);
+       return q->ops->set_priority(q, value);
 }
 
-static int engine_set_timeslice(struct xe_device *xe, struct xe_engine *e,
-                               u64 value, bool create)
+static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q,
+                                   u64 value, bool create)
 {
        if (!capable(CAP_SYS_NICE))
                return -EPERM;
 
-       return e->ops->set_timeslice(e, value);
+       return q->ops->set_timeslice(q, value);
 }
 
-static int engine_set_preemption_timeout(struct xe_device *xe,
-                                        struct xe_engine *e, u64 value,
-                                        bool create)
+static int exec_queue_set_preemption_timeout(struct xe_device *xe,
+                                            struct xe_exec_queue *q, u64 value,
+                                            bool create)
 {
        if (!capable(CAP_SYS_NICE))
                return -EPERM;
 
-       return e->ops->set_preempt_timeout(e, value);
+       return q->ops->set_preempt_timeout(q, value);
 }
 
-static int engine_set_compute_mode(struct xe_device *xe, struct xe_engine *e,
-                                  u64 value, bool create)
+static int exec_queue_set_compute_mode(struct xe_device *xe, struct xe_exec_queue *q,
+                                      u64 value, bool create)
 {
        if (XE_IOCTL_DBG(xe, !create))
                return -EINVAL;
 
-       if (XE_IOCTL_DBG(xe, e->flags & ENGINE_FLAG_COMPUTE_MODE))
+       if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_COMPUTE_MODE))
                return -EINVAL;
 
-       if (XE_IOCTL_DBG(xe, e->flags & ENGINE_FLAG_VM))
+       if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_VM))
                return -EINVAL;
 
        if (value) {
-               struct xe_vm *vm = e->vm;
+               struct xe_vm *vm = q->vm;
                int err;
 
                if (XE_IOCTL_DBG(xe, xe_vm_in_fault_mode(vm)))
@@ -242,42 +242,42 @@ static int engine_set_compute_mode(struct xe_device *xe, struct xe_engine *e,
                if (XE_IOCTL_DBG(xe, !xe_vm_in_compute_mode(vm)))
                        return -EOPNOTSUPP;
 
-               if (XE_IOCTL_DBG(xe, e->width != 1))
+               if (XE_IOCTL_DBG(xe, q->width != 1))
                        return -EINVAL;
 
-               e->compute.context = dma_fence_context_alloc(1);
-               spin_lock_init(&e->compute.lock);
+               q->compute.context = dma_fence_context_alloc(1);
+               spin_lock_init(&q->compute.lock);
 
-               err = xe_vm_add_compute_engine(vm, e);
+               err = xe_vm_add_compute_exec_queue(vm, q);
                if (XE_IOCTL_DBG(xe, err))
                        return err;
 
-               e->flags |= ENGINE_FLAG_COMPUTE_MODE;
-               e->flags &= ~ENGINE_FLAG_PERSISTENT;
+               q->flags |= EXEC_QUEUE_FLAG_COMPUTE_MODE;
+               q->flags &= ~EXEC_QUEUE_FLAG_PERSISTENT;
        }
 
        return 0;
 }
 
-static int engine_set_persistence(struct xe_device *xe, struct xe_engine *e,
-                                 u64 value, bool create)
+static int exec_queue_set_persistence(struct xe_device *xe, struct xe_exec_queue *q,
+                                     u64 value, bool create)
 {
        if (XE_IOCTL_DBG(xe, !create))
                return -EINVAL;
 
-       if (XE_IOCTL_DBG(xe, e->flags & ENGINE_FLAG_COMPUTE_MODE))
+       if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_COMPUTE_MODE))
                return -EINVAL;
 
        if (value)
-               e->flags |= ENGINE_FLAG_PERSISTENT;
+               q->flags |= EXEC_QUEUE_FLAG_PERSISTENT;
        else
-               e->flags &= ~ENGINE_FLAG_PERSISTENT;
+               q->flags &= ~EXEC_QUEUE_FLAG_PERSISTENT;
 
        return 0;
 }
 
-static int engine_set_job_timeout(struct xe_device *xe, struct xe_engine *e,
-                                 u64 value, bool create)
+static int exec_queue_set_job_timeout(struct xe_device *xe, struct xe_exec_queue *q,
+                                     u64 value, bool create)
 {
        if (XE_IOCTL_DBG(xe, !create))
                return -EINVAL;
@@ -285,11 +285,11 @@ static int engine_set_job_timeout(struct xe_device *xe, struct xe_engine *e,
        if (!capable(CAP_SYS_NICE))
                return -EPERM;
 
-       return e->ops->set_job_timeout(e, value);
+       return q->ops->set_job_timeout(q, value);
 }
 
-static int engine_set_acc_trigger(struct xe_device *xe, struct xe_engine *e,
-                                 u64 value, bool create)
+static int exec_queue_set_acc_trigger(struct xe_device *xe, struct xe_exec_queue *q,
+                                     u64 value, bool create)
 {
        if (XE_IOCTL_DBG(xe, !create))
                return -EINVAL;
@@ -297,13 +297,13 @@ static int engine_set_acc_trigger(struct xe_device *xe, struct xe_engine *e,
        if (XE_IOCTL_DBG(xe, !xe->info.supports_usm))
                return -EINVAL;
 
-       e->usm.acc_trigger = value;
+       q->usm.acc_trigger = value;
 
        return 0;
 }
 
-static int engine_set_acc_notify(struct xe_device *xe, struct xe_engine *e,
-                                u64 value, bool create)
+static int exec_queue_set_acc_notify(struct xe_device *xe, struct xe_exec_queue *q,
+                                    u64 value, bool create)
 {
        if (XE_IOCTL_DBG(xe, !create))
                return -EINVAL;
@@ -311,13 +311,13 @@ static int engine_set_acc_notify(struct xe_device *xe, struct xe_engine *e,
        if (XE_IOCTL_DBG(xe, !xe->info.supports_usm))
                return -EINVAL;
 
-       e->usm.acc_notify = value;
+       q->usm.acc_notify = value;
 
        return 0;
 }
 
-static int engine_set_acc_granularity(struct xe_device *xe, struct xe_engine *e,
-                                     u64 value, bool create)
+static int exec_queue_set_acc_granularity(struct xe_device *xe, struct xe_exec_queue *q,
+                                         u64 value, bool create)
 {
        if (XE_IOCTL_DBG(xe, !create))
                return -EINVAL;
@@ -325,34 +325,34 @@ static int engine_set_acc_granularity(struct xe_device *xe, struct xe_engine *e,
        if (XE_IOCTL_DBG(xe, !xe->info.supports_usm))
                return -EINVAL;
 
-       e->usm.acc_granularity = value;
+       q->usm.acc_granularity = value;
 
        return 0;
 }
 
-typedef int (*xe_engine_set_property_fn)(struct xe_device *xe,
-                                        struct xe_engine *e,
-                                        u64 value, bool create);
-
-static const xe_engine_set_property_fn engine_set_property_funcs[] = {
-       [XE_ENGINE_SET_PROPERTY_PRIORITY] = engine_set_priority,
-       [XE_ENGINE_SET_PROPERTY_TIMESLICE] = engine_set_timeslice,
-       [XE_ENGINE_SET_PROPERTY_PREEMPTION_TIMEOUT] = engine_set_preemption_timeout,
-       [XE_ENGINE_SET_PROPERTY_COMPUTE_MODE] = engine_set_compute_mode,
-       [XE_ENGINE_SET_PROPERTY_PERSISTENCE] = engine_set_persistence,
-       [XE_ENGINE_SET_PROPERTY_JOB_TIMEOUT] = engine_set_job_timeout,
-       [XE_ENGINE_SET_PROPERTY_ACC_TRIGGER] = engine_set_acc_trigger,
-       [XE_ENGINE_SET_PROPERTY_ACC_NOTIFY] = engine_set_acc_notify,
-       [XE_ENGINE_SET_PROPERTY_ACC_GRANULARITY] = engine_set_acc_granularity,
+typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
+                                            struct xe_exec_queue *q,
+                                            u64 value, bool create);
+
+static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
+       [XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
+       [XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
+       [XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT] = exec_queue_set_preemption_timeout,
+       [XE_EXEC_QUEUE_SET_PROPERTY_COMPUTE_MODE] = exec_queue_set_compute_mode,
+       [XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE] = exec_queue_set_persistence,
+       [XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT] = exec_queue_set_job_timeout,
+       [XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER] = exec_queue_set_acc_trigger,
+       [XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY] = exec_queue_set_acc_notify,
+       [XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY] = exec_queue_set_acc_granularity,
 };
 
-static int engine_user_ext_set_property(struct xe_device *xe,
-                                       struct xe_engine *e,
-                                       u64 extension,
-                                       bool create)
+static int exec_queue_user_ext_set_property(struct xe_device *xe,
+                                           struct xe_exec_queue *q,
+                                           u64 extension,
+                                           bool create)
 {
        u64 __user *address = u64_to_user_ptr(extension);
-       struct drm_xe_ext_engine_set_property ext;
+       struct drm_xe_ext_exec_queue_set_property ext;
        int err;
        u32 idx;
 
@@ -361,26 +361,26 @@ static int engine_user_ext_set_property(struct xe_device *xe,
                return -EFAULT;
 
        if (XE_IOCTL_DBG(xe, ext.property >=
-                        ARRAY_SIZE(engine_set_property_funcs)) ||
+                        ARRAY_SIZE(exec_queue_set_property_funcs)) ||
            XE_IOCTL_DBG(xe, ext.pad))
                return -EINVAL;
 
-       idx = array_index_nospec(ext.property, ARRAY_SIZE(engine_set_property_funcs));
-       return engine_set_property_funcs[idx](xe, e, ext.value,  create);
+       idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));
+       return exec_queue_set_property_funcs[idx](xe, q, ext.value,  create);
 }
 
-typedef int (*xe_engine_user_extension_fn)(struct xe_device *xe,
-                                          struct xe_engine *e,
-                                          u64 extension,
-                                          bool create);
+typedef int (*xe_exec_queue_user_extension_fn)(struct xe_device *xe,
+                                              struct xe_exec_queue *q,
+                                              u64 extension,
+                                              bool create);
 
-static const xe_engine_set_property_fn engine_user_extension_funcs[] = {
-       [XE_ENGINE_EXTENSION_SET_PROPERTY] = engine_user_ext_set_property,
+static const xe_exec_queue_set_property_fn exec_queue_user_extension_funcs[] = {
+       [XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY] = exec_queue_user_ext_set_property,
 };
 
 #define MAX_USER_EXTENSIONS    16
-static int engine_user_extensions(struct xe_device *xe, struct xe_engine *e,
-                                 u64 extensions, int ext_number, bool create)
+static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
+                                     u64 extensions, int ext_number, bool create)
 {
        u64 __user *address = u64_to_user_ptr(extensions);
        struct xe_user_extension ext;
@@ -396,17 +396,17 @@ static int engine_user_extensions(struct xe_device *xe, struct xe_engine *e,
 
        if (XE_IOCTL_DBG(xe, ext.pad) ||
            XE_IOCTL_DBG(xe, ext.name >=
-                        ARRAY_SIZE(engine_user_extension_funcs)))
+                        ARRAY_SIZE(exec_queue_user_extension_funcs)))
                return -EINVAL;
 
        idx = array_index_nospec(ext.name,
-                                ARRAY_SIZE(engine_user_extension_funcs));
-       err = engine_user_extension_funcs[idx](xe, e, extensions, create);
+                                ARRAY_SIZE(exec_queue_user_extension_funcs));
+       err = exec_queue_user_extension_funcs[idx](xe, q, extensions, create);
        if (XE_IOCTL_DBG(xe, err))
                return err;
 
        if (ext.next_extension)
-               return engine_user_extensions(xe, e, ext.next_extension,
+               return exec_queue_user_extensions(xe, q, ext.next_extension,
                                              ++ext_number, create);
 
        return 0;
@@ -440,9 +440,9 @@ find_hw_engine(struct xe_device *xe,
                               eci.engine_instance, true);
 }
 
-static u32 bind_engine_logical_mask(struct xe_device *xe, struct xe_gt *gt,
-                                   struct drm_xe_engine_class_instance *eci,
-                                   u16 width, u16 num_placements)
+static u32 bind_exec_queue_logical_mask(struct xe_device *xe, struct xe_gt *gt,
+                                       struct drm_xe_engine_class_instance *eci,
+                                       u16 width, u16 num_placements)
 {
        struct xe_hw_engine *hwe;
        enum xe_hw_engine_id id;
@@ -520,19 +520,19 @@ static u32 calc_validate_logical_mask(struct xe_device *xe, struct xe_gt *gt,
        return return_mask;
 }
 
-int xe_engine_create_ioctl(struct drm_device *dev, void *data,
-                          struct drm_file *file)
+int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
+                              struct drm_file *file)
 {
        struct xe_device *xe = to_xe_device(dev);
        struct xe_file *xef = to_xe_file(file);
-       struct drm_xe_engine_create *args = data;
+       struct drm_xe_exec_queue_create *args = data;
        struct drm_xe_engine_class_instance eci[XE_HW_ENGINE_MAX_INSTANCE];
        struct drm_xe_engine_class_instance __user *user_eci =
                u64_to_user_ptr(args->instances);
        struct xe_hw_engine *hwe;
        struct xe_vm *vm, *migrate_vm;
        struct xe_gt *gt;
-       struct xe_engine *e = NULL;
+       struct xe_exec_queue *q = NULL;
        u32 logical_mask;
        u32 id;
        u32 len;
@@ -557,15 +557,15 @@ int xe_engine_create_ioctl(struct drm_device *dev, void *data,
 
        if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) {
                for_each_gt(gt, xe, id) {
-                       struct xe_engine *new;
+                       struct xe_exec_queue *new;
 
                        if (xe_gt_is_media_type(gt))
                                continue;
 
                        eci[0].gt_id = gt->info.id;
-                       logical_mask = bind_engine_logical_mask(xe, gt, eci,
-                                                               args->width,
-                                                               args->num_placements);
+                       logical_mask = bind_exec_queue_logical_mask(xe, gt, eci,
+                                                                   args->width,
+                                                                   args->num_placements);
                        if (XE_IOCTL_DBG(xe, !logical_mask))
                                return -EINVAL;
 
@@ -577,28 +577,28 @@ int xe_engine_create_ioctl(struct drm_device *dev, void *data,
                        xe_device_mem_access_get(xe);
 
                        migrate_vm = xe_migrate_get_vm(gt_to_tile(gt)->migrate);
-                       new = xe_engine_create(xe, migrate_vm, logical_mask,
-                                              args->width, hwe,
-                                              ENGINE_FLAG_PERSISTENT |
-                                              ENGINE_FLAG_VM |
-                                              (id ?
-                                              ENGINE_FLAG_BIND_ENGINE_CHILD :
-                                              0));
+                       new = xe_exec_queue_create(xe, migrate_vm, logical_mask,
+                                                  args->width, hwe,
+                                                  EXEC_QUEUE_FLAG_PERSISTENT |
+                                                  EXEC_QUEUE_FLAG_VM |
+                                                  (id ?
+                                                   EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD :
+                                                   0));
 
                        xe_device_mem_access_put(xe); /* now held by engine */
 
                        xe_vm_put(migrate_vm);
                        if (IS_ERR(new)) {
                                err = PTR_ERR(new);
-                               if (e)
-                                       goto put_engine;
+                               if (q)
+                                       goto put_exec_queue;
                                return err;
                        }
                        if (id == 0)
-                               e = new;
+                               q = new;
                        else
                                list_add_tail(&new->multi_gt_list,
-                                             &e->multi_gt_link);
+                                             &q->multi_gt_link);
                }
        } else {
                gt = xe_device_get_gt(xe, eci[0].gt_id);
@@ -628,223 +628,223 @@ int xe_engine_create_ioctl(struct drm_device *dev, void *data,
                        return -ENOENT;
                }
 
-               e = xe_engine_create(xe, vm, logical_mask,
-                                    args->width, hwe,
-                                    xe_vm_no_dma_fences(vm) ? 0 :
-                                    ENGINE_FLAG_PERSISTENT);
+               q = xe_exec_queue_create(xe, vm, logical_mask,
+                                        args->width, hwe,
+                                        xe_vm_no_dma_fences(vm) ? 0 :
+                                        EXEC_QUEUE_FLAG_PERSISTENT);
                up_read(&vm->lock);
                xe_vm_put(vm);
-               if (IS_ERR(e))
-                       return PTR_ERR(e);
+               if (IS_ERR(q))
+                       return PTR_ERR(q);
        }
 
        if (args->extensions) {
-               err = engine_user_extensions(xe, e, args->extensions, 0, true);
+               err = exec_queue_user_extensions(xe, q, args->extensions, 0, true);
                if (XE_IOCTL_DBG(xe, err))
-                       goto put_engine;
+                       goto put_exec_queue;
        }
 
-       if (XE_IOCTL_DBG(xe, e->vm && xe_vm_in_compute_mode(e->vm) !=
-                        !!(e->flags & ENGINE_FLAG_COMPUTE_MODE))) {
+       if (XE_IOCTL_DBG(xe, q->vm && xe_vm_in_compute_mode(q->vm) !=
+                        !!(q->flags & EXEC_QUEUE_FLAG_COMPUTE_MODE))) {
                err = -EOPNOTSUPP;
-               goto put_engine;
+               goto put_exec_queue;
        }
 
-       e->persistent.xef = xef;
+       q->persistent.xef = xef;
 
-       mutex_lock(&xef->engine.lock);
-       err = xa_alloc(&xef->engine.xa, &id, e, xa_limit_32b, GFP_KERNEL);
-       mutex_unlock(&xef->engine.lock);
+       mutex_lock(&xef->exec_queue.lock);
+       err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
+       mutex_unlock(&xef->exec_queue.lock);
        if (err)
-               goto put_engine;
+               goto put_exec_queue;
 
-       args->engine_id = id;
+       args->exec_queue_id = id;
 
        return 0;
 
-put_engine:
-       xe_engine_kill(e);
-       xe_engine_put(e);
+put_exec_queue:
+       xe_exec_queue_kill(q);
+       xe_exec_queue_put(q);
        return err;
 }
 
-int xe_engine_get_property_ioctl(struct drm_device *dev, void *data,
-                                struct drm_file *file)
+int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
+                                    struct drm_file *file)
 {
        struct xe_device *xe = to_xe_device(dev);
        struct xe_file *xef = to_xe_file(file);
-       struct drm_xe_engine_get_property *args = data;
-       struct xe_engine *e;
+       struct drm_xe_exec_queue_get_property *args = data;
+       struct xe_exec_queue *q;
        int ret;
 
        if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
                return -EINVAL;
 
-       e = xe_engine_lookup(xef, args->engine_id);
-       if (XE_IOCTL_DBG(xe, !e))
+       q = xe_exec_queue_lookup(xef, args->exec_queue_id);
+       if (XE_IOCTL_DBG(xe, !q))
                return -ENOENT;
 
        switch (args->property) {
-       case XE_ENGINE_GET_PROPERTY_BAN:
-               args->value = !!(e->flags & ENGINE_FLAG_BANNED);
+       case XE_EXEC_QUEUE_GET_PROPERTY_BAN:
+               args->value = !!(q->flags & EXEC_QUEUE_FLAG_BANNED);
                ret = 0;
                break;
        default:
                ret = -EINVAL;
        }
 
-       xe_engine_put(e);
+       xe_exec_queue_put(q);
 
        return ret;
 }
 
-static void engine_kill_compute(struct xe_engine *e)
+static void exec_queue_kill_compute(struct xe_exec_queue *q)
 {
-       if (!xe_vm_in_compute_mode(e->vm))
+       if (!xe_vm_in_compute_mode(q->vm))
                return;
 
-       down_write(&e->vm->lock);
-       list_del(&e->compute.link);
-       --e->vm->preempt.num_engines;
-       if (e->compute.pfence) {
-               dma_fence_enable_sw_signaling(e->compute.pfence);
-               dma_fence_put(e->compute.pfence);
-               e->compute.pfence = NULL;
+       down_write(&q->vm->lock);
+       list_del(&q->compute.link);
+       --q->vm->preempt.num_exec_queues;
+       if (q->compute.pfence) {
+               dma_fence_enable_sw_signaling(q->compute.pfence);
+               dma_fence_put(q->compute.pfence);
+               q->compute.pfence = NULL;
        }
-       up_write(&e->vm->lock);
+       up_write(&q->vm->lock);
 }
 
 /**
- * xe_engine_is_lr() - Whether an engine is long-running
- * @e: The engine
+ * xe_exec_queue_is_lr() - Whether an exec_queue is long-running
+ * @q: The exec_queue
  *
- * Return: True if the engine is long-running, false otherwise.
+ * Return: True if the exec_queue is long-running, false otherwise.
  */
-bool xe_engine_is_lr(struct xe_engine *e)
+bool xe_exec_queue_is_lr(struct xe_exec_queue *q)
 {
-       return e->vm && xe_vm_no_dma_fences(e->vm) &&
-               !(e->flags & ENGINE_FLAG_VM);
+       return q->vm && xe_vm_no_dma_fences(q->vm) &&
+               !(q->flags & EXEC_QUEUE_FLAG_VM);
 }
 
-static s32 xe_engine_num_job_inflight(struct xe_engine *e)
+static s32 xe_exec_queue_num_job_inflight(struct xe_exec_queue *q)
 {
-       return e->lrc->fence_ctx.next_seqno - xe_lrc_seqno(e->lrc) - 1;
+       return q->lrc->fence_ctx.next_seqno - xe_lrc_seqno(q->lrc) - 1;
 }
 
 /**
- * xe_engine_ring_full() - Whether an engine's ring is full
- * @e: The engine
+ * xe_exec_queue_ring_full() - Whether an exec_queue's ring is full
+ * @q: The exec_queue
  *
- * Return: True if the engine's ring is full, false otherwise.
+ * Return: True if the exec_queue's ring is full, false otherwise.
  */
-bool xe_engine_ring_full(struct xe_engine *e)
+bool xe_exec_queue_ring_full(struct xe_exec_queue *q)
 {
-       struct xe_lrc *lrc = e->lrc;
+       struct xe_lrc *lrc = q->lrc;
        s32 max_job = lrc->ring.size / MAX_JOB_SIZE_BYTES;
 
-       return xe_engine_num_job_inflight(e) >= max_job;
+       return xe_exec_queue_num_job_inflight(q) >= max_job;
 }
 
 /**
- * xe_engine_is_idle() - Whether an engine is idle.
- * @engine: The engine
+ * xe_exec_queue_is_idle() - Whether an exec_queue is idle.
+ * @q: The exec_queue
  *
  * FIXME: Need to determine what to use as the short-lived
- * timeline lock for the engines, so that the return value
+ * timeline lock for the exec_queues, so that the return value
  * of this function becomes more than just an advisory
  * snapshot in time. The timeline lock must protect the
- * seqno from racing submissions on the same engine.
+ * seqno from racing submissions on the same exec_queue.
  * Typically vm->resv, but user-created timeline locks use the migrate vm
  * and never grabs the migrate vm->resv so we have a race there.
  *
- * Return: True if the engine is idle, false otherwise.
+ * Return: True if the exec_queue is idle, false otherwise.
  */
-bool xe_engine_is_idle(struct xe_engine *engine)
+bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
 {
-       if (XE_WARN_ON(xe_engine_is_parallel(engine)))
+       if (XE_WARN_ON(xe_exec_queue_is_parallel(q)))
                return false;
 
-       return xe_lrc_seqno(&engine->lrc[0]) ==
-               engine->lrc[0].fence_ctx.next_seqno - 1;
+       return xe_lrc_seqno(&q->lrc[0]) ==
+               q->lrc[0].fence_ctx.next_seqno - 1;
 }
 
-void xe_engine_kill(struct xe_engine *e)
+void xe_exec_queue_kill(struct xe_exec_queue *q)
 {
-       struct xe_engine *engine = e, *next;
+       struct xe_exec_queue *eq = q, *next;
 
-       list_for_each_entry_safe(engine, next, &engine->multi_gt_list,
+       list_for_each_entry_safe(eq, next, &eq->multi_gt_list,
                                 multi_gt_link) {
-               e->ops->kill(engine);
-               engine_kill_compute(engine);
+               q->ops->kill(eq);
+               exec_queue_kill_compute(eq);
        }
 
-       e->ops->kill(e);
-       engine_kill_compute(e);
+       q->ops->kill(q);
+       exec_queue_kill_compute(q);
 }
 
-int xe_engine_destroy_ioctl(struct drm_device *dev, void *data,
-                           struct drm_file *file)
+int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
+                               struct drm_file *file)
 {
        struct xe_device *xe = to_xe_device(dev);
        struct xe_file *xef = to_xe_file(file);
-       struct drm_xe_engine_destroy *args = data;
-       struct xe_engine *e;
+       struct drm_xe_exec_queue_destroy *args = data;
+       struct xe_exec_queue *q;
 
        if (XE_IOCTL_DBG(xe, args->pad) ||
            XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
                return -EINVAL;
 
-       mutex_lock(&xef->engine.lock);
-       e = xa_erase(&xef->engine.xa, args->engine_id);
-       mutex_unlock(&xef->engine.lock);
-       if (XE_IOCTL_DBG(xe, !e))
+       mutex_lock(&xef->exec_queue.lock);
+       q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id);
+       mutex_unlock(&xef->exec_queue.lock);
+       if (XE_IOCTL_DBG(xe, !q))
                return -ENOENT;
 
-       if (!(e->flags & ENGINE_FLAG_PERSISTENT))
-               xe_engine_kill(e);
+       if (!(q->flags & EXEC_QUEUE_FLAG_PERSISTENT))
+               xe_exec_queue_kill(q);
        else
-               xe_device_add_persistent_engines(xe, e);
+               xe_device_add_persistent_exec_queues(xe, q);
 
-       trace_xe_engine_close(e);
-       xe_engine_put(e);
+       trace_xe_exec_queue_close(q);
+       xe_exec_queue_put(q);
 
        return 0;
 }
 
-int xe_engine_set_property_ioctl(struct drm_device *dev, void *data,
-                                struct drm_file *file)
+int xe_exec_queue_set_property_ioctl(struct drm_device *dev, void *data,
+                                    struct drm_file *file)
 {
        struct xe_device *xe = to_xe_device(dev);
        struct xe_file *xef = to_xe_file(file);
-       struct drm_xe_engine_set_property *args = data;
-       struct xe_engine *e;
+       struct drm_xe_exec_queue_set_property *args = data;
+       struct xe_exec_queue *q;
        int ret;
        u32 idx;
 
        if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
                return -EINVAL;
 
-       e = xe_engine_lookup(xef, args->engine_id);
-       if (XE_IOCTL_DBG(xe, !e))
+       q = xe_exec_queue_lookup(xef, args->exec_queue_id);
+       if (XE_IOCTL_DBG(xe, !q))
                return -ENOENT;
 
        if (XE_IOCTL_DBG(xe, args->property >=
-                        ARRAY_SIZE(engine_set_property_funcs))) {
+                        ARRAY_SIZE(exec_queue_set_property_funcs))) {
                ret = -EINVAL;
                goto out;
        }
 
        idx = array_index_nospec(args->property,
-                                ARRAY_SIZE(engine_set_property_funcs));
-       ret = engine_set_property_funcs[idx](xe, e, args->value, false);
+                                ARRAY_SIZE(exec_queue_set_property_funcs));
+       ret = exec_queue_set_property_funcs[idx](xe, q, args->value, false);
        if (XE_IOCTL_DBG(xe, ret))
                goto out;
 
        if (args->extensions)
-               ret = engine_user_extensions(xe, e, args->extensions, 0,
-                                            false);
+               ret = exec_queue_user_extensions(xe, q, args->extensions, 0,
+                                                false);
 out:
-       xe_engine_put(e);
+       xe_exec_queue_put(q);
 
        return ret;
 }
index 3017e4fe308d25e91f324d44b47694d899ffa0db..94a6abee38a6069d73f6a38a8776aaa7b627557f 100644 (file)
@@ -3,10 +3,10 @@
  * Copyright Â© 2021 Intel Corporation
  */
 
-#ifndef _XE_ENGINE_H_
-#define _XE_ENGINE_H_
+#ifndef _XE_EXEC_QUEUE_H_
+#define _XE_EXEC_QUEUE_H_
 
-#include "xe_engine_types.h"
+#include "xe_exec_queue_types.h"
 #include "xe_vm_types.h"
 
 struct drm_device;
@@ -14,50 +14,50 @@ struct drm_file;
 struct xe_device;
 struct xe_file;
 
-struct xe_engine *xe_engine_create(struct xe_device *xe, struct xe_vm *vm,
-                                  u32 logical_mask, u16 width,
-                                  struct xe_hw_engine *hw_engine, u32 flags);
-struct xe_engine *xe_engine_create_class(struct xe_device *xe, struct xe_gt *gt,
-                                        struct xe_vm *vm,
-                                        enum xe_engine_class class, u32 flags);
+struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
+                                          u32 logical_mask, u16 width,
+                                          struct xe_hw_engine *hw_engine, u32 flags);
+struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
+                                                struct xe_vm *vm,
+                                                enum xe_engine_class class, u32 flags);
 
-void xe_engine_fini(struct xe_engine *e);
-void xe_engine_destroy(struct kref *ref);
+void xe_exec_queue_fini(struct xe_exec_queue *q);
+void xe_exec_queue_destroy(struct kref *ref);
 
-struct xe_engine *xe_engine_lookup(struct xe_file *xef, u32 id);
+struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id);
 
-static inline struct xe_engine *xe_engine_get(struct xe_engine *engine)
+static inline struct xe_exec_queue *xe_exec_queue_get(struct xe_exec_queue *q)
 {
-       kref_get(&engine->refcount);
-       return engine;
+       kref_get(&q->refcount);
+       return q;
 }
 
-static inline void xe_engine_put(struct xe_engine *engine)
+static inline void xe_exec_queue_put(struct xe_exec_queue *q)
 {
-       kref_put(&engine->refcount, xe_engine_destroy);
+       kref_put(&q->refcount, xe_exec_queue_destroy);
 }
 
-static inline bool xe_engine_is_parallel(struct xe_engine *engine)
+static inline bool xe_exec_queue_is_parallel(struct xe_exec_queue *q)
 {
-       return engine->width > 1;
+       return q->width > 1;
 }
 
-bool xe_engine_is_lr(struct xe_engine *e);
+bool xe_exec_queue_is_lr(struct xe_exec_queue *q);
 
-bool xe_engine_ring_full(struct xe_engine *e);
+bool xe_exec_queue_ring_full(struct xe_exec_queue *q);
 
-bool xe_engine_is_idle(struct xe_engine *engine);
+bool xe_exec_queue_is_idle(struct xe_exec_queue *q);
 
-void xe_engine_kill(struct xe_engine *e);
+void xe_exec_queue_kill(struct xe_exec_queue *q);
 
-int xe_engine_create_ioctl(struct drm_device *dev, void *data,
-                          struct drm_file *file);
-int xe_engine_destroy_ioctl(struct drm_device *dev, void *data,
-                           struct drm_file *file);
-int xe_engine_set_property_ioctl(struct drm_device *dev, void *data,
-                                struct drm_file *file);
-int xe_engine_get_property_ioctl(struct drm_device *dev, void *data,
-                                struct drm_file *file);
-enum xe_engine_priority xe_engine_device_get_max_priority(struct xe_device *xe);
+int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
+                              struct drm_file *file);
+int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
+                               struct drm_file *file);
+int xe_exec_queue_set_property_ioctl(struct drm_device *dev, void *data,
+                                    struct drm_file *file);
+int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
+                                    struct drm_file *file);
+enum xe_exec_queue_priority xe_exec_queue_device_get_max_priority(struct xe_device *xe);
 
 #endif
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
new file mode 100644 (file)
index 0000000..4506289
--- /dev/null
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright Â© 2022 Intel Corporation
+ */
+
+#ifndef _XE_EXEC_QUEUE_TYPES_H_
+#define _XE_EXEC_QUEUE_TYPES_H_
+
+#include <linux/kref.h>
+
+#include <drm/gpu_scheduler.h>
+
+#include "xe_gpu_scheduler_types.h"
+#include "xe_hw_engine_types.h"
+#include "xe_hw_fence_types.h"
+#include "xe_lrc_types.h"
+
+struct xe_execlist_exec_queue;
+struct xe_gt;
+struct xe_guc_exec_queue;
+struct xe_hw_engine;
+struct xe_vm;
+
+enum xe_exec_queue_priority {
+       XE_EXEC_QUEUE_PRIORITY_UNSET = -2, /* For execlist usage only */
+       XE_EXEC_QUEUE_PRIORITY_LOW = 0,
+       XE_EXEC_QUEUE_PRIORITY_NORMAL,
+       XE_EXEC_QUEUE_PRIORITY_HIGH,
+       XE_EXEC_QUEUE_PRIORITY_KERNEL,
+
+       XE_EXEC_QUEUE_PRIORITY_COUNT
+};
+
+/**
+ * struct xe_exec_queue - Execution queue
+ *
+ * Contains all state necessary for submissions. Can either be a user object or
+ * a kernel object.
+ */
+struct xe_exec_queue {
+       /** @gt: graphics tile this exec queue can submit to */
+       struct xe_gt *gt;
+       /**
+        * @hwe: A hardware of the same class. May (physical engine) or may not
+        * (virtual engine) be where jobs actual engine up running. Should never
+        * really be used for submissions.
+        */
+       struct xe_hw_engine *hwe;
+       /** @refcount: ref count of this exec queue */
+       struct kref refcount;
+       /** @vm: VM (address space) for this exec queue */
+       struct xe_vm *vm;
+       /** @class: class of this exec queue */
+       enum xe_engine_class class;
+       /** @priority: priority of this exec queue */
+       enum xe_exec_queue_priority priority;
+       /**
+        * @logical_mask: logical mask of where job submitted to exec queue can run
+        */
+       u32 logical_mask;
+       /** @name: name of this exec queue */
+       char name[MAX_FENCE_NAME_LEN];
+       /** @width: width (number BB submitted per exec) of this exec queue */
+       u16 width;
+       /** @fence_irq: fence IRQ used to signal job completion */
+       struct xe_hw_fence_irq *fence_irq;
+
+#define EXEC_QUEUE_FLAG_BANNED         BIT(0)
+#define EXEC_QUEUE_FLAG_KERNEL         BIT(1)
+#define EXEC_QUEUE_FLAG_PERSISTENT             BIT(2)
+#define EXEC_QUEUE_FLAG_COMPUTE_MODE   BIT(3)
+/* Caller needs to hold rpm ref when creating engine with EXEC_QUEUE_FLAG_VM */
+#define EXEC_QUEUE_FLAG_VM                     BIT(4)
+#define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD      BIT(5)
+#define EXEC_QUEUE_FLAG_WA                     BIT(6)
+
+       /**
+        * @flags: flags for this exec queue, should statically setup aside from ban
+        * bit
+        */
+       unsigned long flags;
+
+       union {
+               /** @multi_gt_list: list head for VM bind engines if multi-GT */
+               struct list_head multi_gt_list;
+               /** @multi_gt_link: link for VM bind engines if multi-GT */
+               struct list_head multi_gt_link;
+       };
+
+       union {
+               /** @execlist: execlist backend specific state for exec queue */
+               struct xe_execlist_exec_queue *execlist;
+               /** @guc: GuC backend specific state for exec queue */
+               struct xe_guc_exec_queue *guc;
+       };
+
+       /**
+        * @persistent: persistent exec queue state
+        */
+       struct {
+               /** @xef: file which this exec queue belongs to */
+               struct xe_file *xef;
+               /** @link: link in list of persistent exec queues */
+               struct list_head link;
+       } persistent;
+
+       union {
+               /**
+                * @parallel: parallel submission state
+                */
+               struct {
+                       /** @composite_fence_ctx: context composite fence */
+                       u64 composite_fence_ctx;
+                       /** @composite_fence_seqno: seqno for composite fence */
+                       u32 composite_fence_seqno;
+               } parallel;
+               /**
+                * @bind: bind submission state
+                */
+               struct {
+                       /** @fence_ctx: context bind fence */
+                       u64 fence_ctx;
+                       /** @fence_seqno: seqno for bind fence */
+                       u32 fence_seqno;
+               } bind;
+       };
+
+       /** @sched_props: scheduling properties */
+       struct {
+               /** @timeslice_us: timeslice period in micro-seconds */
+               u32 timeslice_us;
+               /** @preempt_timeout_us: preemption timeout in micro-seconds */
+               u32 preempt_timeout_us;
+       } sched_props;
+
+       /** @compute: compute exec queue state */
+       struct {
+               /** @pfence: preemption fence */
+               struct dma_fence *pfence;
+               /** @context: preemption fence context */
+               u64 context;
+               /** @seqno: preemption fence seqno */
+               u32 seqno;
+               /** @link: link into VM's list of exec queues */
+               struct list_head link;
+               /** @lock: preemption fences lock */
+               spinlock_t lock;
+       } compute;
+
+       /** @usm: unified shared memory state */
+       struct {
+               /** @acc_trigger: access counter trigger */
+               u32 acc_trigger;
+               /** @acc_notify: access counter notify */
+               u32 acc_notify;
+               /** @acc_granularity: access counter granularity */
+               u32 acc_granularity;
+       } usm;
+
+       /** @ops: submission backend exec queue operations */
+       const struct xe_exec_queue_ops *ops;
+
+       /** @ring_ops: ring operations for this exec queue */
+       const struct xe_ring_ops *ring_ops;
+       /** @entity: DRM sched entity for this exec queue (1 to 1 relationship) */
+       struct drm_sched_entity *entity;
+       /** @lrc: logical ring context for this exec queue */
+       struct xe_lrc lrc[];
+};
+
+/**
+ * struct xe_exec_queue_ops - Submission backend exec queue operations
+ */
+struct xe_exec_queue_ops {
+       /** @init: Initialize exec queue for submission backend */
+       int (*init)(struct xe_exec_queue *q);
+       /** @kill: Kill inflight submissions for backend */
+       void (*kill)(struct xe_exec_queue *q);
+       /** @fini: Fini exec queue for submission backend */
+       void (*fini)(struct xe_exec_queue *q);
+       /** @set_priority: Set priority for exec queue */
+       int (*set_priority)(struct xe_exec_queue *q,
+                           enum xe_exec_queue_priority priority);
+       /** @set_timeslice: Set timeslice for exec queue */
+       int (*set_timeslice)(struct xe_exec_queue *q, u32 timeslice_us);
+       /** @set_preempt_timeout: Set preemption timeout for exec queue */
+       int (*set_preempt_timeout)(struct xe_exec_queue *q, u32 preempt_timeout_us);
+       /** @set_job_timeout: Set job timeout for exec queue */
+       int (*set_job_timeout)(struct xe_exec_queue *q, u32 job_timeout_ms);
+       /**
+        * @suspend: Suspend exec queue from executing, allowed to be called
+        * multiple times in a row before resume with the caveat that
+        * suspend_wait returns before calling suspend again.
+        */
+       int (*suspend)(struct xe_exec_queue *q);
+       /**
+        * @suspend_wait: Wait for an exec queue to suspend executing, should be
+        * call after suspend.
+        */
+       void (*suspend_wait)(struct xe_exec_queue *q);
+       /**
+        * @resume: Resume exec queue execution, exec queue must be in a suspended
+        * state and dma fence returned from most recent suspend call must be
+        * signalled when this function is called.
+        */
+       void (*resume)(struct xe_exec_queue *q);
+};
+
+#endif
index 5b6748e1a37fa109fcd3582dc27ac928fca97211..3b8be55fe19c95d8368b0247385b58766b372e8b 100644 (file)
@@ -91,7 +91,7 @@ static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc,
 }
 
 static void __xe_execlist_port_start(struct xe_execlist_port *port,
-                                    struct xe_execlist_engine *exl)
+                                    struct xe_execlist_exec_queue *exl)
 {
        struct xe_device *xe = gt_to_xe(port->hwe->gt);
        int max_ctx = FIELD_MAX(GEN11_SW_CTX_ID);
@@ -109,7 +109,7 @@ static void __xe_execlist_port_start(struct xe_execlist_port *port,
                        port->last_ctx_id = 1;
        }
 
-       __start_lrc(port->hwe, exl->engine->lrc, port->last_ctx_id);
+       __start_lrc(port->hwe, exl->q->lrc, port->last_ctx_id);
        port->running_exl = exl;
        exl->has_run = true;
 }
@@ -128,16 +128,16 @@ static void __xe_execlist_port_idle(struct xe_execlist_port *port)
        port->running_exl = NULL;
 }
 
-static bool xe_execlist_is_idle(struct xe_execlist_engine *exl)
+static bool xe_execlist_is_idle(struct xe_execlist_exec_queue *exl)
 {
-       struct xe_lrc *lrc = exl->engine->lrc;
+       struct xe_lrc *lrc = exl->q->lrc;
 
        return lrc->ring.tail == lrc->ring.old_tail;
 }
 
 static void __xe_execlist_port_start_next_active(struct xe_execlist_port *port)
 {
-       struct xe_execlist_engine *exl = NULL;
+       struct xe_execlist_exec_queue *exl = NULL;
        int i;
 
        xe_execlist_port_assert_held(port);
@@ -145,12 +145,12 @@ static void __xe_execlist_port_start_next_active(struct xe_execlist_port *port)
        for (i = ARRAY_SIZE(port->active) - 1; i >= 0; i--) {
                while (!list_empty(&port->active[i])) {
                        exl = list_first_entry(&port->active[i],
-                                              struct xe_execlist_engine,
+                                              struct xe_execlist_exec_queue,
                                               active_link);
                        list_del(&exl->active_link);
 
                        if (xe_execlist_is_idle(exl)) {
-                               exl->active_priority = XE_ENGINE_PRIORITY_UNSET;
+                               exl->active_priority = XE_EXEC_QUEUE_PRIORITY_UNSET;
                                continue;
                        }
 
@@ -198,7 +198,7 @@ static void xe_execlist_port_irq_handler(struct xe_hw_engine *hwe,
 }
 
 static void xe_execlist_port_wake_locked(struct xe_execlist_port *port,
-                                        enum xe_engine_priority priority)
+                                        enum xe_exec_queue_priority priority)
 {
        xe_execlist_port_assert_held(port);
 
@@ -208,25 +208,25 @@ static void xe_execlist_port_wake_locked(struct xe_execlist_port *port,
        __xe_execlist_port_start_next_active(port);
 }
 
-static void xe_execlist_make_active(struct xe_execlist_engine *exl)
+static void xe_execlist_make_active(struct xe_execlist_exec_queue *exl)
 {
        struct xe_execlist_port *port = exl->port;
-       enum xe_engine_priority priority = exl->active_priority;
+       enum xe_exec_queue_priority priority = exl->active_priority;
 
-       XE_WARN_ON(priority == XE_ENGINE_PRIORITY_UNSET);
+       XE_WARN_ON(priority == XE_EXEC_QUEUE_PRIORITY_UNSET);
        XE_WARN_ON(priority < 0);
        XE_WARN_ON(priority >= ARRAY_SIZE(exl->port->active));
 
        spin_lock_irq(&port->lock);
 
        if (exl->active_priority != priority &&
-           exl->active_priority != XE_ENGINE_PRIORITY_UNSET) {
+           exl->active_priority != XE_EXEC_QUEUE_PRIORITY_UNSET) {
                /* Priority changed, move it to the right list */
                list_del(&exl->active_link);
-               exl->active_priority = XE_ENGINE_PRIORITY_UNSET;
+               exl->active_priority = XE_EXEC_QUEUE_PRIORITY_UNSET;
        }
 
-       if (exl->active_priority == XE_ENGINE_PRIORITY_UNSET) {
+       if (exl->active_priority == XE_EXEC_QUEUE_PRIORITY_UNSET) {
                exl->active_priority = priority;
                list_add_tail(&exl->active_link, &port->active[priority]);
        }
@@ -293,10 +293,10 @@ static struct dma_fence *
 execlist_run_job(struct drm_sched_job *drm_job)
 {
        struct xe_sched_job *job = to_xe_sched_job(drm_job);
-       struct xe_engine *e = job->engine;
-       struct xe_execlist_engine *exl = job->engine->execlist;
+       struct xe_exec_queue *q = job->q;
+       struct xe_execlist_exec_queue *exl = job->q->execlist;
 
-       e->ring_ops->emit_job(job);
+       q->ring_ops->emit_job(job);
        xe_execlist_make_active(exl);
 
        return dma_fence_get(job->fence);
@@ -314,11 +314,11 @@ static const struct drm_sched_backend_ops drm_sched_ops = {
        .free_job = execlist_job_free,
 };
 
-static int execlist_engine_init(struct xe_engine *e)
+static int execlist_exec_queue_init(struct xe_exec_queue *q)
 {
        struct drm_gpu_scheduler *sched;
-       struct xe_execlist_engine *exl;
-       struct xe_device *xe = gt_to_xe(e->gt);
+       struct xe_execlist_exec_queue *exl;
+       struct xe_device *xe = gt_to_xe(q->gt);
        int err;
 
        XE_WARN_ON(xe_device_guc_submission_enabled(xe));
@@ -329,13 +329,13 @@ static int execlist_engine_init(struct xe_engine *e)
        if (!exl)
                return -ENOMEM;
 
-       exl->engine = e;
+       exl->q = q;
 
        err = drm_sched_init(&exl->sched, &drm_sched_ops, NULL, 1,
-                            e->lrc[0].ring.size / MAX_JOB_SIZE_BYTES,
+                            q->lrc[0].ring.size / MAX_JOB_SIZE_BYTES,
                             XE_SCHED_HANG_LIMIT, XE_SCHED_JOB_TIMEOUT,
-                            NULL, NULL, e->hwe->name,
-                            gt_to_xe(e->gt)->drm.dev);
+                            NULL, NULL, q->hwe->name,
+                            gt_to_xe(q->gt)->drm.dev);
        if (err)
                goto err_free;
 
@@ -344,30 +344,30 @@ static int execlist_engine_init(struct xe_engine *e)
        if (err)
                goto err_sched;
 
-       exl->port = e->hwe->exl_port;
+       exl->port = q->hwe->exl_port;
        exl->has_run = false;
-       exl->active_priority = XE_ENGINE_PRIORITY_UNSET;
-       e->execlist = exl;
-       e->entity = &exl->entity;
+       exl->active_priority = XE_EXEC_QUEUE_PRIORITY_UNSET;
+       q->execlist = exl;
+       q->entity = &exl->entity;
 
-       switch (e->class) {
+       switch (q->class) {
        case XE_ENGINE_CLASS_RENDER:
-               sprintf(e->name, "rcs%d", ffs(e->logical_mask) - 1);
+               sprintf(q->name, "rcs%d", ffs(q->logical_mask) - 1);
                break;
        case XE_ENGINE_CLASS_VIDEO_DECODE:
-               sprintf(e->name, "vcs%d", ffs(e->logical_mask) - 1);
+               sprintf(q->name, "vcs%d", ffs(q->logical_mask) - 1);
                break;
        case XE_ENGINE_CLASS_VIDEO_ENHANCE:
-               sprintf(e->name, "vecs%d", ffs(e->logical_mask) - 1);
+               sprintf(q->name, "vecs%d", ffs(q->logical_mask) - 1);
                break;
        case XE_ENGINE_CLASS_COPY:
-               sprintf(e->name, "bcs%d", ffs(e->logical_mask) - 1);
+               sprintf(q->name, "bcs%d", ffs(q->logical_mask) - 1);
                break;
        case XE_ENGINE_CLASS_COMPUTE:
-               sprintf(e->name, "ccs%d", ffs(e->logical_mask) - 1);
+               sprintf(q->name, "ccs%d", ffs(q->logical_mask) - 1);
                break;
        default:
-               XE_WARN_ON(e->class);
+               XE_WARN_ON(q->class);
        }
 
        return 0;
@@ -379,96 +379,96 @@ err_free:
        return err;
 }
 
-static void execlist_engine_fini_async(struct work_struct *w)
+static void execlist_exec_queue_fini_async(struct work_struct *w)
 {
-       struct xe_execlist_engine *ee =
-               container_of(w, struct xe_execlist_engine, fini_async);
-       struct xe_engine *e = ee->engine;
-       struct xe_execlist_engine *exl = e->execlist;
+       struct xe_execlist_exec_queue *ee =
+               container_of(w, struct xe_execlist_exec_queue, fini_async);
+       struct xe_exec_queue *q = ee->q;
+       struct xe_execlist_exec_queue *exl = q->execlist;
        unsigned long flags;
 
-       XE_WARN_ON(xe_device_guc_submission_enabled(gt_to_xe(e->gt)));
+       XE_WARN_ON(xe_device_guc_submission_enabled(gt_to_xe(q->gt)));
 
        spin_lock_irqsave(&exl->port->lock, flags);
-       if (WARN_ON(exl->active_priority != XE_ENGINE_PRIORITY_UNSET))
+       if (WARN_ON(exl->active_priority != XE_EXEC_QUEUE_PRIORITY_UNSET))
                list_del(&exl->active_link);
        spin_unlock_irqrestore(&exl->port->lock, flags);
 
-       if (e->flags & ENGINE_FLAG_PERSISTENT)
-               xe_device_remove_persistent_engines(gt_to_xe(e->gt), e);
+       if (q->flags & EXEC_QUEUE_FLAG_PERSISTENT)
+               xe_device_remove_persistent_exec_queues(gt_to_xe(q->gt), q);
        drm_sched_entity_fini(&exl->entity);
        drm_sched_fini(&exl->sched);
        kfree(exl);
 
-       xe_engine_fini(e);
+       xe_exec_queue_fini(q);
 }
 
-static void execlist_engine_kill(struct xe_engine *e)
+static void execlist_exec_queue_kill(struct xe_exec_queue *q)
 {
        /* NIY */
 }
 
-static void execlist_engine_fini(struct xe_engine *e)
+static void execlist_exec_queue_fini(struct xe_exec_queue *q)
 {
-       INIT_WORK(&e->execlist->fini_async, execlist_engine_fini_async);
-       queue_work(system_unbound_wq, &e->execlist->fini_async);
+       INIT_WORK(&q->execlist->fini_async, execlist_exec_queue_fini_async);
+       queue_work(system_unbound_wq, &q->execlist->fini_async);
 }
 
-static int execlist_engine_set_priority(struct xe_engine *e,
-                                       enum xe_engine_priority priority)
+static int execlist_exec_queue_set_priority(struct xe_exec_queue *q,
+                                           enum xe_exec_queue_priority priority)
 {
        /* NIY */
        return 0;
 }
 
-static int execlist_engine_set_timeslice(struct xe_engine *e, u32 timeslice_us)
+static int execlist_exec_queue_set_timeslice(struct xe_exec_queue *q, u32 timeslice_us)
 {
        /* NIY */
        return 0;
 }
 
-static int execlist_engine_set_preempt_timeout(struct xe_engine *e,
-                                              u32 preempt_timeout_us)
+static int execlist_exec_queue_set_preempt_timeout(struct xe_exec_queue *q,
+                                                  u32 preempt_timeout_us)
 {
        /* NIY */
        return 0;
 }
 
-static int execlist_engine_set_job_timeout(struct xe_engine *e,
-                                          u32 job_timeout_ms)
+static int execlist_exec_queue_set_job_timeout(struct xe_exec_queue *q,
+                                              u32 job_timeout_ms)
 {
        /* NIY */
        return 0;
 }
 
-static int execlist_engine_suspend(struct xe_engine *e)
+static int execlist_exec_queue_suspend(struct xe_exec_queue *q)
 {
        /* NIY */
        return 0;
 }
 
-static void execlist_engine_suspend_wait(struct xe_engine *e)
+static void execlist_exec_queue_suspend_wait(struct xe_exec_queue *q)
 
 {
        /* NIY */
 }
 
-static void execlist_engine_resume(struct xe_engine *e)
+static void execlist_exec_queue_resume(struct xe_exec_queue *q)
 {
        /* NIY */
 }
 
-static const struct xe_engine_ops execlist_engine_ops = {
-       .init = execlist_engine_init,
-       .kill = execlist_engine_kill,
-       .fini = execlist_engine_fini,
-       .set_priority = execlist_engine_set_priority,
-       .set_timeslice = execlist_engine_set_timeslice,
-       .set_preempt_timeout = execlist_engine_set_preempt_timeout,
-       .set_job_timeout = execlist_engine_set_job_timeout,
-       .suspend = execlist_engine_suspend,
-       .suspend_wait = execlist_engine_suspend_wait,
-       .resume = execlist_engine_resume,
+static const struct xe_exec_queue_ops execlist_exec_queue_ops = {
+       .init = execlist_exec_queue_init,
+       .kill = execlist_exec_queue_kill,
+       .fini = execlist_exec_queue_fini,
+       .set_priority = execlist_exec_queue_set_priority,
+       .set_timeslice = execlist_exec_queue_set_timeslice,
+       .set_preempt_timeout = execlist_exec_queue_set_preempt_timeout,
+       .set_job_timeout = execlist_exec_queue_set_job_timeout,
+       .suspend = execlist_exec_queue_suspend,
+       .suspend_wait = execlist_exec_queue_suspend_wait,
+       .resume = execlist_exec_queue_resume,
 };
 
 int xe_execlist_init(struct xe_gt *gt)
@@ -477,7 +477,7 @@ int xe_execlist_init(struct xe_gt *gt)
        if (xe_device_guc_submission_enabled(gt_to_xe(gt)))
                return 0;
 
-       gt->engine_ops = &execlist_engine_ops;
+       gt->exec_queue_ops = &execlist_exec_queue_ops;
 
        return 0;
 }
index 9b1239b47292b3653b782242bcffbb4fe2e14418..f94bbf4c53e43fa5e637b2945fa6da6080ed8b16 100644 (file)
 #include <linux/spinlock.h>
 #include <linux/workqueue.h>
 
-#include "xe_engine_types.h"
+#include "xe_exec_queue_types.h"
 
 struct xe_hw_engine;
-struct xe_execlist_engine;
+struct xe_execlist_exec_queue;
 
 struct xe_execlist_port {
        struct xe_hw_engine *hwe;
 
        spinlock_t lock;
 
-       struct list_head active[XE_ENGINE_PRIORITY_COUNT];
+       struct list_head active[XE_EXEC_QUEUE_PRIORITY_COUNT];
 
        u32 last_ctx_id;
 
-       struct xe_execlist_engine *running_exl;
+       struct xe_execlist_exec_queue *running_exl;
 
        struct timer_list irq_fail;
 };
 
-struct xe_execlist_engine {
-       struct xe_engine *engine;
+struct xe_execlist_exec_queue {
+       struct xe_exec_queue *q;
 
        struct drm_gpu_scheduler sched;
 
@@ -42,7 +42,7 @@ struct xe_execlist_engine {
 
        struct work_struct fini_async;
 
-       enum xe_engine_priority active_priority;
+       enum xe_exec_queue_priority active_priority;
        struct list_head active_link;
 };
 
index 543b085723c58a73494aced340c7fb4ec96fae9f..3077faa1e7923f5a163edc5a2592e0d5aa96c8c6 100644 (file)
@@ -26,7 +26,7 @@
 #include "xe_gt_sysfs.h"
 #include "xe_gt_tlb_invalidation.h"
 #include "xe_gt_topology.h"
-#include "xe_guc_engine_types.h"
+#include "xe_guc_exec_queue_types.h"
 #include "xe_hw_fence.h"
 #include "xe_irq.h"
 #include "xe_lrc.h"
@@ -81,7 +81,7 @@ static void gt_fini(struct drm_device *drm, void *arg)
 
 static void gt_reset_worker(struct work_struct *w);
 
-static int emit_nop_job(struct xe_gt *gt, struct xe_engine *e)
+static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
 {
        struct xe_sched_job *job;
        struct xe_bb *bb;
@@ -94,7 +94,7 @@ static int emit_nop_job(struct xe_gt *gt, struct xe_engine *e)
                return PTR_ERR(bb);
 
        batch_ofs = xe_bo_ggtt_addr(gt_to_tile(gt)->mem.kernel_bb_pool->bo);
-       job = xe_bb_create_wa_job(e, bb, batch_ofs);
+       job = xe_bb_create_wa_job(q, bb, batch_ofs);
        if (IS_ERR(job)) {
                xe_bb_free(bb, NULL);
                return PTR_ERR(job);
@@ -115,9 +115,9 @@ static int emit_nop_job(struct xe_gt *gt, struct xe_engine *e)
        return 0;
 }
 
-static int emit_wa_job(struct xe_gt *gt, struct xe_engine *e)
+static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
 {
-       struct xe_reg_sr *sr = &e->hwe->reg_lrc;
+       struct xe_reg_sr *sr = &q->hwe->reg_lrc;
        struct xe_reg_sr_entry *entry;
        unsigned long reg;
        struct xe_sched_job *job;
@@ -143,7 +143,7 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_engine *e)
        }
 
        batch_ofs = xe_bo_ggtt_addr(gt_to_tile(gt)->mem.kernel_bb_pool->bo);
-       job = xe_bb_create_wa_job(e, bb, batch_ofs);
+       job = xe_bb_create_wa_job(q, bb, batch_ofs);
        if (IS_ERR(job)) {
                xe_bb_free(bb, NULL);
                return PTR_ERR(job);
@@ -173,7 +173,7 @@ int xe_gt_record_default_lrcs(struct xe_gt *gt)
        int err = 0;
 
        for_each_hw_engine(hwe, gt, id) {
-               struct xe_engine *e, *nop_e;
+               struct xe_exec_queue *q, *nop_q;
                struct xe_vm *vm;
                void *default_lrc;
 
@@ -192,58 +192,58 @@ int xe_gt_record_default_lrcs(struct xe_gt *gt)
                        return -ENOMEM;
 
                vm = xe_migrate_get_vm(tile->migrate);
-               e = xe_engine_create(xe, vm, BIT(hwe->logical_instance), 1,
-                                    hwe, ENGINE_FLAG_WA);
-               if (IS_ERR(e)) {
-                       err = PTR_ERR(e);
-                       xe_gt_err(gt, "hwe %s: xe_engine_create failed (%pe)\n",
-                                 hwe->name, e);
+               q = xe_exec_queue_create(xe, vm, BIT(hwe->logical_instance), 1,
+                                        hwe, EXEC_QUEUE_FLAG_WA);
+               if (IS_ERR(q)) {
+                       err = PTR_ERR(q);
+                       xe_gt_err(gt, "hwe %s: xe_exec_queue_create failed (%pe)\n",
+                                 hwe->name, q);
                        goto put_vm;
                }
 
                /* Prime golden LRC with known good state */
-               err = emit_wa_job(gt, e);
+               err = emit_wa_job(gt, q);
                if (err) {
                        xe_gt_err(gt, "hwe %s: emit_wa_job failed (%pe) guc_id=%u\n",
-                                 hwe->name, ERR_PTR(err), e->guc->id);
-                       goto put_engine;
+                                 hwe->name, ERR_PTR(err), q->guc->id);
+                       goto put_exec_queue;
                }
 
-               nop_e = xe_engine_create(xe, vm, BIT(hwe->logical_instance),
-                                        1, hwe, ENGINE_FLAG_WA);
-               if (IS_ERR(nop_e)) {
-                       err = PTR_ERR(nop_e);
-                       xe_gt_err(gt, "hwe %s: nop xe_engine_create failed (%pe)\n",
-                                 hwe->name, nop_e);
-                       goto put_engine;
+               nop_q = xe_exec_queue_create(xe, vm, BIT(hwe->logical_instance),
+                                            1, hwe, EXEC_QUEUE_FLAG_WA);
+               if (IS_ERR(nop_q)) {
+                       err = PTR_ERR(nop_q);
+                       xe_gt_err(gt, "hwe %s: nop xe_exec_queue_create failed (%pe)\n",
+                                 hwe->name, nop_q);
+                       goto put_exec_queue;
                }
 
                /* Switch to different LRC */
-               err = emit_nop_job(gt, nop_e);
+               err = emit_nop_job(gt, nop_q);
                if (err) {
                        xe_gt_err(gt, "hwe %s: nop emit_nop_job failed (%pe) guc_id=%u\n",
-                                 hwe->name, ERR_PTR(err), nop_e->guc->id);
-                       goto put_nop_e;
+                                 hwe->name, ERR_PTR(err), nop_q->guc->id);
+                       goto put_nop_q;
                }
 
                /* Reload golden LRC to record the effect of any indirect W/A */
-               err = emit_nop_job(gt, e);
+               err = emit_nop_job(gt, q);
                if (err) {
                        xe_gt_err(gt, "hwe %s: emit_nop_job failed (%pe) guc_id=%u\n",
-                                 hwe->name, ERR_PTR(err), e->guc->id);
-                       goto put_nop_e;
+                                 hwe->name, ERR_PTR(err), q->guc->id);
+                       goto put_nop_q;
                }
 
                xe_map_memcpy_from(xe, default_lrc,
-                                  &e->lrc[0].bo->vmap,
-                                  xe_lrc_pphwsp_offset(&e->lrc[0]),
+                                  &q->lrc[0].bo->vmap,
+                                  xe_lrc_pphwsp_offset(&q->lrc[0]),
                                   xe_lrc_size(xe, hwe->class));
 
                gt->default_lrc[hwe->class] = default_lrc;
-put_nop_e:
-               xe_engine_put(nop_e);
-put_engine:
-               xe_engine_put(e);
+put_nop_q:
+               xe_exec_queue_put(nop_q);
+put_exec_queue:
+               xe_exec_queue_put(q);
 put_vm:
                xe_vm_put(vm);
                if (err)
index 78a9fe9f0bd36f597d1fd21e2a08eed88cb50b05..c326932e53d799f2dc300993dd636a39b5d78644 100644 (file)
@@ -14,7 +14,7 @@
 #include "xe_sa_types.h"
 #include "xe_uc_types.h"
 
-struct xe_engine_ops;
+struct xe_exec_queue_ops;
 struct xe_migrate;
 struct xe_ring_ops;
 
@@ -269,8 +269,8 @@ struct xe_gt {
        /** @gtidle: idle properties of GT */
        struct xe_gt_idle gtidle;
 
-       /** @engine_ops: submission backend engine operations */
-       const struct xe_engine_ops *engine_ops;
+       /** @exec_queue_ops: submission backend exec queue operations */
+       const struct xe_exec_queue_ops *exec_queue_ops;
 
        /**
         * @ring_ops: ring operations for this hw engine (1 per engine class)
index a7da29be2e51421053c520ac9f46a55a627dec1b..7d1244df959df05ff65ab25cb080d9e6c232709c 100644 (file)
@@ -495,7 +495,7 @@ static void guc_mmio_reg_state_init(struct xe_guc_ads *ads)
                u8 gc;
 
                /*
-                * 1. Write all MMIO entries for this engine to the table. No
+                * 1. Write all MMIO entries for this exec queue to the table. No
                 * need to worry about fused-off engines and when there are
                 * entries in the regset: the reg_state_list has been zero'ed
                 * by xe_guc_ads_populate()
index fb1d63ffaee499e8f93bf6aae38b167189c945e6..59136b6a7c6f434ed10c90612667c3a7d9eb5d29 100644 (file)
@@ -888,11 +888,11 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
                ret = xe_guc_deregister_done_handler(guc, payload, adj_len);
                break;
        case XE_GUC_ACTION_CONTEXT_RESET_NOTIFICATION:
-               ret = xe_guc_engine_reset_handler(guc, payload, adj_len);
+               ret = xe_guc_exec_queue_reset_handler(guc, payload, adj_len);
                break;
        case XE_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION:
-               ret = xe_guc_engine_reset_failure_handler(guc, payload,
-                                                         adj_len);
+               ret = xe_guc_exec_queue_reset_failure_handler(guc, payload,
+                                                             adj_len);
                break;
        case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE:
                /* Selftest only at the moment */
@@ -902,8 +902,8 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
                /* FIXME: Handle this */
                break;
        case XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR:
-               ret = xe_guc_engine_memory_cat_error_handler(guc, payload,
-                                                            adj_len);
+               ret = xe_guc_exec_queue_memory_cat_error_handler(guc, payload,
+                                                                adj_len);
                break;
        case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
                ret = xe_guc_pagefault_handler(guc, payload, adj_len);
diff --git a/drivers/gpu/drm/xe/xe_guc_engine_types.h b/drivers/gpu/drm/xe/xe_guc_engine_types.h
deleted file mode 100644 (file)
index 5565412..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright Â© 2022 Intel Corporation
- */
-
-#ifndef _XE_GUC_ENGINE_TYPES_H_
-#define _XE_GUC_ENGINE_TYPES_H_
-
-#include <linux/spinlock.h>
-#include <linux/workqueue.h>
-
-#include "xe_gpu_scheduler_types.h"
-
-struct dma_fence;
-struct xe_engine;
-
-/**
- * struct xe_guc_engine - GuC specific state for an xe_engine
- */
-struct xe_guc_engine {
-       /** @engine: Backpointer to parent xe_engine */
-       struct xe_engine *engine;
-       /** @sched: GPU scheduler for this xe_engine */
-       struct xe_gpu_scheduler sched;
-       /** @entity: Scheduler entity for this xe_engine */
-       struct xe_sched_entity entity;
-       /**
-        * @static_msgs: Static messages for this xe_engine, used when a message
-        * needs to sent through the GPU scheduler but memory allocations are
-        * not allowed.
-        */
-#define MAX_STATIC_MSG_TYPE    3
-       struct xe_sched_msg static_msgs[MAX_STATIC_MSG_TYPE];
-       /** @lr_tdr: long running TDR worker */
-       struct work_struct lr_tdr;
-       /** @fini_async: do final fini async from this worker */
-       struct work_struct fini_async;
-       /** @resume_time: time of last resume */
-       u64 resume_time;
-       /** @state: GuC specific state for this xe_engine */
-       atomic_t state;
-       /** @wqi_head: work queue item tail */
-       u32 wqi_head;
-       /** @wqi_tail: work queue item tail */
-       u32 wqi_tail;
-       /** @id: GuC id for this xe_engine */
-       u16 id;
-       /** @suspend_wait: wait queue used to wait on pending suspends */
-       wait_queue_head_t suspend_wait;
-       /** @suspend_pending: a suspend of the engine is pending */
-       bool suspend_pending;
-};
-
-#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
new file mode 100644 (file)
index 0000000..4c39f01
--- /dev/null
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright Â© 2022 Intel Corporation
+ */
+
+#ifndef _XE_GUC_ENGINE_TYPES_H_
+#define _XE_GUC_ENGINE_TYPES_H_
+
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include "xe_gpu_scheduler_types.h"
+
+struct dma_fence;
+struct xe_exec_queue;
+
+/**
+ * struct xe_guc_exec_queue - GuC specific state for an xe_exec_queue
+ */
+struct xe_guc_exec_queue {
+       /** @q: Backpointer to parent xe_exec_queue */
+       struct xe_exec_queue *q;
+       /** @sched: GPU scheduler for this xe_exec_queue */
+       struct xe_gpu_scheduler sched;
+       /** @entity: Scheduler entity for this xe_exec_queue */
+       struct xe_sched_entity entity;
+       /**
+        * @static_msgs: Static messages for this xe_exec_queue, used when
+        * a message needs to sent through the GPU scheduler but memory
+        * allocations are not allowed.
+        */
+#define MAX_STATIC_MSG_TYPE    3
+       struct xe_sched_msg static_msgs[MAX_STATIC_MSG_TYPE];
+       /** @lr_tdr: long running TDR worker */
+       struct work_struct lr_tdr;
+       /** @fini_async: do final fini async from this worker */
+       struct work_struct fini_async;
+       /** @resume_time: time of last resume */
+       u64 resume_time;
+       /** @state: GuC specific state for this xe_exec_queue */
+       atomic_t state;
+       /** @wqi_head: work queue item tail */
+       u32 wqi_head;
+       /** @wqi_tail: work queue item tail */
+       u32 wqi_tail;
+       /** @id: GuC id for this exec_queue */
+       u16 id;
+       /** @suspend_wait: wait queue used to wait on pending suspends */
+       wait_queue_head_t suspend_wait;
+       /** @suspend_pending: a suspend of the exec_queue is pending */
+       bool suspend_pending;
+};
+
+#endif
index 7515d7fbb723dda7f06b2ba9b4c17519fdd8bd48..4216a6d9e47879a5d6ca890f4d1163f70248d3f6 100644 (file)
@@ -69,13 +69,13 @@ struct guc_klv_generic_dw_t {
 } __packed;
 
 /* Format of the UPDATE_CONTEXT_POLICIES H2G data packet */
-struct guc_update_engine_policy_header {
+struct guc_update_exec_queue_policy_header {
        u32 action;
        u32 guc_id;
 } __packed;
 
-struct guc_update_engine_policy {
-       struct guc_update_engine_policy_header header;
+struct guc_update_exec_queue_policy {
+       struct guc_update_exec_queue_policy_header header;
        struct guc_klv_generic_dw_t klv[GUC_CONTEXT_POLICIES_KLV_NUM_IDS];
 } __packed;
 
index 5198e91eeefbe0df3e66a4d1197425486c1c7b94..42454c12efb30519bcea9cea2fbbc5ae970b3b92 100644 (file)
@@ -22,7 +22,7 @@
 #include "xe_gt.h"
 #include "xe_guc.h"
 #include "xe_guc_ct.h"
-#include "xe_guc_engine_types.h"
+#include "xe_guc_exec_queue_types.h"
 #include "xe_guc_submit_types.h"
 #include "xe_hw_engine.h"
 #include "xe_hw_fence.h"
@@ -48,9 +48,9 @@ guc_to_xe(struct xe_guc *guc)
 }
 
 static struct xe_guc *
-engine_to_guc(struct xe_engine *e)
+exec_queue_to_guc(struct xe_exec_queue *q)
 {
-       return &e->gt->uc.guc;
+       return &q->gt->uc.guc;
 }
 
 /*
@@ -58,140 +58,140 @@ engine_to_guc(struct xe_engine *e)
  * as the same time (e.g. a suspend can be happning at the same time as schedule
  * engine done being processed).
  */
-#define ENGINE_STATE_REGISTERED                (1 << 0)
+#define EXEC_QUEUE_STATE_REGISTERED            (1 << 0)
 #define ENGINE_STATE_ENABLED           (1 << 1)
-#define ENGINE_STATE_PENDING_ENABLE    (1 << 2)
-#define ENGINE_STATE_PENDING_DISABLE   (1 << 3)
-#define ENGINE_STATE_DESTROYED         (1 << 4)
+#define EXEC_QUEUE_STATE_PENDING_ENABLE        (1 << 2)
+#define EXEC_QUEUE_STATE_PENDING_DISABLE       (1 << 3)
+#define EXEC_QUEUE_STATE_DESTROYED             (1 << 4)
 #define ENGINE_STATE_SUSPENDED         (1 << 5)
-#define ENGINE_STATE_RESET             (1 << 6)
+#define EXEC_QUEUE_STATE_RESET         (1 << 6)
 #define ENGINE_STATE_KILLED            (1 << 7)
 
-static bool engine_registered(struct xe_engine *e)
+static bool exec_queue_registered(struct xe_exec_queue *q)
 {
-       return atomic_read(&e->guc->state) & ENGINE_STATE_REGISTERED;
+       return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_REGISTERED;
 }
 
-static void set_engine_registered(struct xe_engine *e)
+static void set_exec_queue_registered(struct xe_exec_queue *q)
 {
-       atomic_or(ENGINE_STATE_REGISTERED, &e->guc->state);
+       atomic_or(EXEC_QUEUE_STATE_REGISTERED, &q->guc->state);
 }
 
-static void clear_engine_registered(struct xe_engine *e)
+static void clear_exec_queue_registered(struct xe_exec_queue *q)
 {
-       atomic_and(~ENGINE_STATE_REGISTERED, &e->guc->state);
+       atomic_and(~EXEC_QUEUE_STATE_REGISTERED, &q->guc->state);
 }
 
-static bool engine_enabled(struct xe_engine *e)
+static bool exec_queue_enabled(struct xe_exec_queue *q)
 {
-       return atomic_read(&e->guc->state) & ENGINE_STATE_ENABLED;
+       return atomic_read(&q->guc->state) & ENGINE_STATE_ENABLED;
 }
 
-static void set_engine_enabled(struct xe_engine *e)
+static void set_exec_queue_enabled(struct xe_exec_queue *q)
 {
-       atomic_or(ENGINE_STATE_ENABLED, &e->guc->state);
+       atomic_or(ENGINE_STATE_ENABLED, &q->guc->state);
 }
 
-static void clear_engine_enabled(struct xe_engine *e)
+static void clear_exec_queue_enabled(struct xe_exec_queue *q)
 {
-       atomic_and(~ENGINE_STATE_ENABLED, &e->guc->state);
+       atomic_and(~ENGINE_STATE_ENABLED, &q->guc->state);
 }
 
-static bool engine_pending_enable(struct xe_engine *e)
+static bool exec_queue_pending_enable(struct xe_exec_queue *q)
 {
-       return atomic_read(&e->guc->state) & ENGINE_STATE_PENDING_ENABLE;
+       return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_ENABLE;
 }
 
-static void set_engine_pending_enable(struct xe_engine *e)
+static void set_exec_queue_pending_enable(struct xe_exec_queue *q)
 {
-       atomic_or(ENGINE_STATE_PENDING_ENABLE, &e->guc->state);
+       atomic_or(EXEC_QUEUE_STATE_PENDING_ENABLE, &q->guc->state);
 }
 
-static void clear_engine_pending_enable(struct xe_engine *e)
+static void clear_exec_queue_pending_enable(struct xe_exec_queue *q)
 {
-       atomic_and(~ENGINE_STATE_PENDING_ENABLE, &e->guc->state);
+       atomic_and(~EXEC_QUEUE_STATE_PENDING_ENABLE, &q->guc->state);
 }
 
-static bool engine_pending_disable(struct xe_engine *e)
+static bool exec_queue_pending_disable(struct xe_exec_queue *q)
 {
-       return atomic_read(&e->guc->state) & ENGINE_STATE_PENDING_DISABLE;
+       return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_DISABLE;
 }
 
-static void set_engine_pending_disable(struct xe_engine *e)
+static void set_exec_queue_pending_disable(struct xe_exec_queue *q)
 {
-       atomic_or(ENGINE_STATE_PENDING_DISABLE, &e->guc->state);
+       atomic_or(EXEC_QUEUE_STATE_PENDING_DISABLE, &q->guc->state);
 }
 
-static void clear_engine_pending_disable(struct xe_engine *e)
+static void clear_exec_queue_pending_disable(struct xe_exec_queue *q)
 {
-       atomic_and(~ENGINE_STATE_PENDING_DISABLE, &e->guc->state);
+       atomic_and(~EXEC_QUEUE_STATE_PENDING_DISABLE, &q->guc->state);
 }
 
-static bool engine_destroyed(struct xe_engine *e)
+static bool exec_queue_destroyed(struct xe_exec_queue *q)
 {
-       return atomic_read(&e->guc->state) & ENGINE_STATE_DESTROYED;
+       return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_DESTROYED;
 }
 
-static void set_engine_destroyed(struct xe_engine *e)
+static void set_exec_queue_destroyed(struct xe_exec_queue *q)
 {
-       atomic_or(ENGINE_STATE_DESTROYED, &e->guc->state);
+       atomic_or(EXEC_QUEUE_STATE_DESTROYED, &q->guc->state);
 }
 
-static bool engine_banned(struct xe_engine *e)
+static bool exec_queue_banned(struct xe_exec_queue *q)
 {
-       return (e->flags & ENGINE_FLAG_BANNED);
+       return (q->flags & EXEC_QUEUE_FLAG_BANNED);
 }
 
-static void set_engine_banned(struct xe_engine *e)
+static void set_exec_queue_banned(struct xe_exec_queue *q)
 {
-       e->flags |= ENGINE_FLAG_BANNED;
+       q->flags |= EXEC_QUEUE_FLAG_BANNED;
 }
 
-static bool engine_suspended(struct xe_engine *e)
+static bool exec_queue_suspended(struct xe_exec_queue *q)
 {
-       return atomic_read(&e->guc->state) & ENGINE_STATE_SUSPENDED;
+       return atomic_read(&q->guc->state) & ENGINE_STATE_SUSPENDED;
 }
 
-static void set_engine_suspended(struct xe_engine *e)
+static void set_exec_queue_suspended(struct xe_exec_queue *q)
 {
-       atomic_or(ENGINE_STATE_SUSPENDED, &e->guc->state);
+       atomic_or(ENGINE_STATE_SUSPENDED, &q->guc->state);
 }
 
-static void clear_engine_suspended(struct xe_engine *e)
+static void clear_exec_queue_suspended(struct xe_exec_queue *q)
 {
-       atomic_and(~ENGINE_STATE_SUSPENDED, &e->guc->state);
+       atomic_and(~ENGINE_STATE_SUSPENDED, &q->guc->state);
 }
 
-static bool engine_reset(struct xe_engine *e)
+static bool exec_queue_reset(struct xe_exec_queue *q)
 {
-       return atomic_read(&e->guc->state) & ENGINE_STATE_RESET;
+       return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_RESET;
 }
 
-static void set_engine_reset(struct xe_engine *e)
+static void set_exec_queue_reset(struct xe_exec_queue *q)
 {
-       atomic_or(ENGINE_STATE_RESET, &e->guc->state);
+       atomic_or(EXEC_QUEUE_STATE_RESET, &q->guc->state);
 }
 
-static bool engine_killed(struct xe_engine *e)
+static bool exec_queue_killed(struct xe_exec_queue *q)
 {
-       return atomic_read(&e->guc->state) & ENGINE_STATE_KILLED;
+       return atomic_read(&q->guc->state) & ENGINE_STATE_KILLED;
 }
 
-static void set_engine_killed(struct xe_engine *e)
+static void set_exec_queue_killed(struct xe_exec_queue *q)
 {
-       atomic_or(ENGINE_STATE_KILLED, &e->guc->state);
+       atomic_or(ENGINE_STATE_KILLED, &q->guc->state);
 }
 
-static bool engine_killed_or_banned(struct xe_engine *e)
+static bool exec_queue_killed_or_banned(struct xe_exec_queue *q)
 {
-       return engine_killed(e) || engine_banned(e);
+       return exec_queue_killed(q) || exec_queue_banned(q);
 }
 
 static void guc_submit_fini(struct drm_device *drm, void *arg)
 {
        struct xe_guc *guc = arg;
 
-       xa_destroy(&guc->submission_state.engine_lookup);
+       xa_destroy(&guc->submission_state.exec_queue_lookup);
        ida_destroy(&guc->submission_state.guc_ids);
        bitmap_free(guc->submission_state.guc_ids_bitmap);
 }
@@ -201,7 +201,7 @@ static void guc_submit_fini(struct drm_device *drm, void *arg)
 #define GUC_ID_NUMBER_SLRC     (GUC_ID_MAX - GUC_ID_NUMBER_MLRC)
 #define GUC_ID_START_MLRC      GUC_ID_NUMBER_SLRC
 
-static const struct xe_engine_ops guc_engine_ops;
+static const struct xe_exec_queue_ops guc_exec_queue_ops;
 
 static void primelockdep(struct xe_guc *guc)
 {
@@ -228,10 +228,10 @@ int xe_guc_submit_init(struct xe_guc *guc)
        if (!guc->submission_state.guc_ids_bitmap)
                return -ENOMEM;
 
-       gt->engine_ops = &guc_engine_ops;
+       gt->exec_queue_ops = &guc_exec_queue_ops;
 
        mutex_init(&guc->submission_state.lock);
-       xa_init(&guc->submission_state.engine_lookup);
+       xa_init(&guc->submission_state.exec_queue_lookup);
        ida_init(&guc->submission_state.guc_ids);
 
        spin_lock_init(&guc->submission_state.suspend.lock);
@@ -246,7 +246,7 @@ int xe_guc_submit_init(struct xe_guc *guc)
        return 0;
 }
 
-static int alloc_guc_id(struct xe_guc *guc, struct xe_engine *e)
+static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
 {
        int ret;
        void *ptr;
@@ -260,11 +260,11 @@ static int alloc_guc_id(struct xe_guc *guc, struct xe_engine *e)
         */
        lockdep_assert_held(&guc->submission_state.lock);
 
-       if (xe_engine_is_parallel(e)) {
+       if (xe_exec_queue_is_parallel(q)) {
                void *bitmap = guc->submission_state.guc_ids_bitmap;
 
                ret = bitmap_find_free_region(bitmap, GUC_ID_NUMBER_MLRC,
-                                             order_base_2(e->width));
+                                             order_base_2(q->width));
        } else {
                ret = ida_simple_get(&guc->submission_state.guc_ids, 0,
                                     GUC_ID_NUMBER_SLRC, GFP_NOWAIT);
@@ -272,12 +272,12 @@ static int alloc_guc_id(struct xe_guc *guc, struct xe_engine *e)
        if (ret < 0)
                return ret;
 
-       e->guc->id = ret;
-       if (xe_engine_is_parallel(e))
-               e->guc->id += GUC_ID_START_MLRC;
+       q->guc->id = ret;
+       if (xe_exec_queue_is_parallel(q))
+               q->guc->id += GUC_ID_START_MLRC;
 
-       ptr = xa_store(&guc->submission_state.engine_lookup,
-                      e->guc->id, e, GFP_NOWAIT);
+       ptr = xa_store(&guc->submission_state.exec_queue_lookup,
+                      q->guc->id, q, GFP_NOWAIT);
        if (IS_ERR(ptr)) {
                ret = PTR_ERR(ptr);
                goto err_release;
@@ -286,29 +286,29 @@ static int alloc_guc_id(struct xe_guc *guc, struct xe_engine *e)
        return 0;
 
 err_release:
-       ida_simple_remove(&guc->submission_state.guc_ids, e->guc->id);
+       ida_simple_remove(&guc->submission_state.guc_ids, q->guc->id);
        return ret;
 }
 
-static void release_guc_id(struct xe_guc *guc, struct xe_engine *e)
+static void release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
 {
        mutex_lock(&guc->submission_state.lock);
-       xa_erase(&guc->submission_state.engine_lookup, e->guc->id);
-       if (xe_engine_is_parallel(e))
+       xa_erase(&guc->submission_state.exec_queue_lookup, q->guc->id);
+       if (xe_exec_queue_is_parallel(q))
                bitmap_release_region(guc->submission_state.guc_ids_bitmap,
-                                     e->guc->id - GUC_ID_START_MLRC,
-                                     order_base_2(e->width));
+                                     q->guc->id - GUC_ID_START_MLRC,
+                                     order_base_2(q->width));
        else
-               ida_simple_remove(&guc->submission_state.guc_ids, e->guc->id);
+               ida_simple_remove(&guc->submission_state.guc_ids, q->guc->id);
        mutex_unlock(&guc->submission_state.lock);
 }
 
-struct engine_policy {
+struct exec_queue_policy {
        u32 count;
-       struct guc_update_engine_policy h2g;
+       struct guc_update_exec_queue_policy h2g;
 };
 
-static u32 __guc_engine_policy_action_size(struct engine_policy *policy)
+static u32 __guc_exec_queue_policy_action_size(struct exec_queue_policy *policy)
 {
        size_t bytes = sizeof(policy->h2g.header) +
                       (sizeof(policy->h2g.klv[0]) * policy->count);
@@ -316,8 +316,8 @@ static u32 __guc_engine_policy_action_size(struct engine_policy *policy)
        return bytes / sizeof(u32);
 }
 
-static void __guc_engine_policy_start_klv(struct engine_policy *policy,
-                                         u16 guc_id)
+static void __guc_exec_queue_policy_start_klv(struct exec_queue_policy *policy,
+                                             u16 guc_id)
 {
        policy->h2g.header.action =
                XE_GUC_ACTION_HOST2GUC_UPDATE_CONTEXT_POLICIES;
@@ -325,8 +325,8 @@ static void __guc_engine_policy_start_klv(struct engine_policy *policy,
        policy->count = 0;
 }
 
-#define MAKE_ENGINE_POLICY_ADD(func, id) \
-static void __guc_engine_policy_add_##func(struct engine_policy *policy, \
+#define MAKE_EXEC_QUEUE_POLICY_ADD(func, id) \
+static void __guc_exec_queue_policy_add_##func(struct exec_queue_policy *policy, \
                                           u32 data) \
 { \
        XE_WARN_ON(policy->count >= GUC_CONTEXT_POLICIES_KLV_NUM_IDS); \
@@ -339,45 +339,45 @@ static void __guc_engine_policy_add_##func(struct engine_policy *policy, \
        policy->count++; \
 }
 
-MAKE_ENGINE_POLICY_ADD(execution_quantum, EXECUTION_QUANTUM)
-MAKE_ENGINE_POLICY_ADD(preemption_timeout, PREEMPTION_TIMEOUT)
-MAKE_ENGINE_POLICY_ADD(priority, SCHEDULING_PRIORITY)
-#undef MAKE_ENGINE_POLICY_ADD
+MAKE_EXEC_QUEUE_POLICY_ADD(execution_quantum, EXECUTION_QUANTUM)
+MAKE_EXEC_QUEUE_POLICY_ADD(preemption_timeout, PREEMPTION_TIMEOUT)
+MAKE_EXEC_QUEUE_POLICY_ADD(priority, SCHEDULING_PRIORITY)
+#undef MAKE_EXEC_QUEUE_POLICY_ADD
 
-static const int xe_engine_prio_to_guc[] = {
-       [XE_ENGINE_PRIORITY_LOW] = GUC_CLIENT_PRIORITY_NORMAL,
-       [XE_ENGINE_PRIORITY_NORMAL] = GUC_CLIENT_PRIORITY_KMD_NORMAL,
-       [XE_ENGINE_PRIORITY_HIGH] = GUC_CLIENT_PRIORITY_HIGH,
-       [XE_ENGINE_PRIORITY_KERNEL] = GUC_CLIENT_PRIORITY_KMD_HIGH,
+static const int xe_exec_queue_prio_to_guc[] = {
+       [XE_EXEC_QUEUE_PRIORITY_LOW] = GUC_CLIENT_PRIORITY_NORMAL,
+       [XE_EXEC_QUEUE_PRIORITY_NORMAL] = GUC_CLIENT_PRIORITY_KMD_NORMAL,
+       [XE_EXEC_QUEUE_PRIORITY_HIGH] = GUC_CLIENT_PRIORITY_HIGH,
+       [XE_EXEC_QUEUE_PRIORITY_KERNEL] = GUC_CLIENT_PRIORITY_KMD_HIGH,
 };
 
-static void init_policies(struct xe_guc *guc, struct xe_engine *e)
+static void init_policies(struct xe_guc *guc, struct xe_exec_queue *q)
 {
-       struct engine_policy policy;
-       enum xe_engine_priority prio = e->priority;
-       u32 timeslice_us = e->sched_props.timeslice_us;
-       u32 preempt_timeout_us = e->sched_props.preempt_timeout_us;
+       struct exec_queue_policy policy;
+       enum xe_exec_queue_priority prio = q->priority;
+       u32 timeslice_us = q->sched_props.timeslice_us;
+       u32 preempt_timeout_us = q->sched_props.preempt_timeout_us;
 
-       XE_WARN_ON(!engine_registered(e));
+       XE_WARN_ON(!exec_queue_registered(q));
 
-       __guc_engine_policy_start_klv(&policy, e->guc->id);
-       __guc_engine_policy_add_priority(&policy, xe_engine_prio_to_guc[prio]);
-       __guc_engine_policy_add_execution_quantum(&policy, timeslice_us);
-       __guc_engine_policy_add_preemption_timeout(&policy, preempt_timeout_us);
+       __guc_exec_queue_policy_start_klv(&policy, q->guc->id);
+       __guc_exec_queue_policy_add_priority(&policy, xe_exec_queue_prio_to_guc[prio]);
+       __guc_exec_queue_policy_add_execution_quantum(&policy, timeslice_us);
+       __guc_exec_queue_policy_add_preemption_timeout(&policy, preempt_timeout_us);
 
        xe_guc_ct_send(&guc->ct, (u32 *)&policy.h2g,
-                      __guc_engine_policy_action_size(&policy), 0, 0);
+                      __guc_exec_queue_policy_action_size(&policy), 0, 0);
 }
 
-static void set_min_preemption_timeout(struct xe_guc *guc, struct xe_engine *e)
+static void set_min_preemption_timeout(struct xe_guc *guc, struct xe_exec_queue *q)
 {
-       struct engine_policy policy;
+       struct exec_queue_policy policy;
 
-       __guc_engine_policy_start_klv(&policy, e->guc->id);
-       __guc_engine_policy_add_preemption_timeout(&policy, 1);
+       __guc_exec_queue_policy_start_klv(&policy, q->guc->id);
+       __guc_exec_queue_policy_add_preemption_timeout(&policy, 1);
 
        xe_guc_ct_send(&guc->ct, (u32 *)&policy.h2g,
-                      __guc_engine_policy_action_size(&policy), 0, 0);
+                      __guc_exec_queue_policy_action_size(&policy), 0, 0);
 }
 
 #define parallel_read(xe_, map_, field_) \
@@ -388,7 +388,7 @@ static void set_min_preemption_timeout(struct xe_guc *guc, struct xe_engine *e)
                        field_, val_)
 
 static void __register_mlrc_engine(struct xe_guc *guc,
-                                  struct xe_engine *e,
+                                  struct xe_exec_queue *q,
                                   struct guc_ctxt_registration_info *info)
 {
 #define MAX_MLRC_REG_SIZE      (13 + XE_HW_ENGINE_MAX_INSTANCE * 2)
@@ -396,7 +396,7 @@ static void __register_mlrc_engine(struct xe_guc *guc,
        int len = 0;
        int i;
 
-       XE_WARN_ON(!xe_engine_is_parallel(e));
+       XE_WARN_ON(!xe_exec_queue_is_parallel(q));
 
        action[len++] = XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
        action[len++] = info->flags;
@@ -408,12 +408,12 @@ static void __register_mlrc_engine(struct xe_guc *guc,
        action[len++] = info->wq_base_lo;
        action[len++] = info->wq_base_hi;
        action[len++] = info->wq_size;
-       action[len++] = e->width;
+       action[len++] = q->width;
        action[len++] = info->hwlrca_lo;
        action[len++] = info->hwlrca_hi;
 
-       for (i = 1; i < e->width; ++i) {
-               struct xe_lrc *lrc = e->lrc + i;
+       for (i = 1; i < q->width; ++i) {
+               struct xe_lrc *lrc = q->lrc + i;
 
                action[len++] = lower_32_bits(xe_lrc_descriptor(lrc));
                action[len++] = upper_32_bits(xe_lrc_descriptor(lrc));
@@ -446,24 +446,24 @@ static void __register_engine(struct xe_guc *guc,
        xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0);
 }
 
-static void register_engine(struct xe_engine *e)
+static void register_engine(struct xe_exec_queue *q)
 {
-       struct xe_guc *guc = engine_to_guc(e);
+       struct xe_guc *guc = exec_queue_to_guc(q);
        struct xe_device *xe = guc_to_xe(guc);
-       struct xe_lrc *lrc = e->lrc;
+       struct xe_lrc *lrc = q->lrc;
        struct guc_ctxt_registration_info info;
 
-       XE_WARN_ON(engine_registered(e));
+       XE_WARN_ON(exec_queue_registered(q));
 
        memset(&info, 0, sizeof(info));
-       info.context_idx = e->guc->id;
-       info.engine_class = xe_engine_class_to_guc_class(e->class);
-       info.engine_submit_mask = e->logical_mask;
+       info.context_idx = q->guc->id;
+       info.engine_class = xe_engine_class_to_guc_class(q->class);
+       info.engine_submit_mask = q->logical_mask;
        info.hwlrca_lo = lower_32_bits(xe_lrc_descriptor(lrc));
        info.hwlrca_hi = upper_32_bits(xe_lrc_descriptor(lrc));
        info.flags = CONTEXT_REGISTRATION_FLAG_KMD;
 
-       if (xe_engine_is_parallel(e)) {
+       if (xe_exec_queue_is_parallel(q)) {
                u32 ggtt_addr = xe_lrc_parallel_ggtt_addr(lrc);
                struct iosys_map map = xe_lrc_parallel_map(lrc);
 
@@ -477,8 +477,8 @@ static void register_engine(struct xe_engine *e)
                        offsetof(struct guc_submit_parallel_scratch, wq[0]));
                info.wq_size = WQ_SIZE;
 
-               e->guc->wqi_head = 0;
-               e->guc->wqi_tail = 0;
+               q->guc->wqi_head = 0;
+               q->guc->wqi_tail = 0;
                xe_map_memset(xe, &map, 0, 0, PARALLEL_SCRATCH_SIZE - WQ_SIZE);
                parallel_write(xe, map, wq_desc.wq_status, WQ_STATUS_ACTIVE);
        }
@@ -488,38 +488,38 @@ static void register_engine(struct xe_engine *e)
         * the GuC as jobs signal immediately and can't destroy an engine if the
         * GuC has a reference to it.
         */
-       if (xe_engine_is_lr(e))
-               xe_engine_get(e);
+       if (xe_exec_queue_is_lr(q))
+               xe_exec_queue_get(q);
 
-       set_engine_registered(e);
-       trace_xe_engine_register(e);
-       if (xe_engine_is_parallel(e))
-               __register_mlrc_engine(guc, e, &info);
+       set_exec_queue_registered(q);
+       trace_xe_exec_queue_register(q);
+       if (xe_exec_queue_is_parallel(q))
+               __register_mlrc_engine(guc, q, &info);
        else
                __register_engine(guc, &info);
-       init_policies(guc, e);
+       init_policies(guc, q);
 }
 
-static u32 wq_space_until_wrap(struct xe_engine *e)
+static u32 wq_space_until_wrap(struct xe_exec_queue *q)
 {
-       return (WQ_SIZE - e->guc->wqi_tail);
+       return (WQ_SIZE - q->guc->wqi_tail);
 }
 
-static int wq_wait_for_space(struct xe_engine *e, u32 wqi_size)
+static int wq_wait_for_space(struct xe_exec_queue *q, u32 wqi_size)
 {
-       struct xe_guc *guc = engine_to_guc(e);
+       struct xe_guc *guc = exec_queue_to_guc(q);
        struct xe_device *xe = guc_to_xe(guc);
-       struct iosys_map map = xe_lrc_parallel_map(e->lrc);
+       struct iosys_map map = xe_lrc_parallel_map(q->lrc);
        unsigned int sleep_period_ms = 1;
 
 #define AVAILABLE_SPACE \
-       CIRC_SPACE(e->guc->wqi_tail, e->guc->wqi_head, WQ_SIZE)
+       CIRC_SPACE(q->guc->wqi_tail, q->guc->wqi_head, WQ_SIZE)
        if (wqi_size > AVAILABLE_SPACE) {
 try_again:
-               e->guc->wqi_head = parallel_read(xe, map, wq_desc.head);
+               q->guc->wqi_head = parallel_read(xe, map, wq_desc.head);
                if (wqi_size > AVAILABLE_SPACE) {
                        if (sleep_period_ms == 1024) {
-                               xe_gt_reset_async(e->gt);
+                               xe_gt_reset_async(q->gt);
                                return -ENODEV;
                        }
 
@@ -533,52 +533,52 @@ try_again:
        return 0;
 }
 
-static int wq_noop_append(struct xe_engine *e)
+static int wq_noop_append(struct xe_exec_queue *q)
 {
-       struct xe_guc *guc = engine_to_guc(e);
+       struct xe_guc *guc = exec_queue_to_guc(q);
        struct xe_device *xe = guc_to_xe(guc);
-       struct iosys_map map = xe_lrc_parallel_map(e->lrc);
-       u32 len_dw = wq_space_until_wrap(e) / sizeof(u32) - 1;
+       struct iosys_map map = xe_lrc_parallel_map(q->lrc);
+       u32 len_dw = wq_space_until_wrap(q) / sizeof(u32) - 1;
 
-       if (wq_wait_for_space(e, wq_space_until_wrap(e)))
+       if (wq_wait_for_space(q, wq_space_until_wrap(q)))
                return -ENODEV;
 
        XE_WARN_ON(!FIELD_FIT(WQ_LEN_MASK, len_dw));
 
-       parallel_write(xe, map, wq[e->guc->wqi_tail / sizeof(u32)],
+       parallel_write(xe, map, wq[q->guc->wqi_tail / sizeof(u32)],
                       FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) |
                       FIELD_PREP(WQ_LEN_MASK, len_dw));
-       e->guc->wqi_tail = 0;
+       q->guc->wqi_tail = 0;
 
        return 0;
 }
 
-static void wq_item_append(struct xe_engine *e)
+static void wq_item_append(struct xe_exec_queue *q)
 {
-       struct xe_guc *guc = engine_to_guc(e);
+       struct xe_guc *guc = exec_queue_to_guc(q);
        struct xe_device *xe = guc_to_xe(guc);
-       struct iosys_map map = xe_lrc_parallel_map(e->lrc);
+       struct iosys_map map = xe_lrc_parallel_map(q->lrc);
 #define WQ_HEADER_SIZE 4       /* Includes 1 LRC address too */
        u32 wqi[XE_HW_ENGINE_MAX_INSTANCE + (WQ_HEADER_SIZE - 1)];
-       u32 wqi_size = (e->width + (WQ_HEADER_SIZE - 1)) * sizeof(u32);
+       u32 wqi_size = (q->width + (WQ_HEADER_SIZE - 1)) * sizeof(u32);
        u32 len_dw = (wqi_size / sizeof(u32)) - 1;
        int i = 0, j;
 
-       if (wqi_size > wq_space_until_wrap(e)) {
-               if (wq_noop_append(e))
+       if (wqi_size > wq_space_until_wrap(q)) {
+               if (wq_noop_append(q))
                        return;
        }
-       if (wq_wait_for_space(e, wqi_size))
+       if (wq_wait_for_space(q, wqi_size))
                return;
 
        wqi[i++] = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_MULTI_LRC) |
                FIELD_PREP(WQ_LEN_MASK, len_dw);
-       wqi[i++] = xe_lrc_descriptor(e->lrc);
-       wqi[i++] = FIELD_PREP(WQ_GUC_ID_MASK, e->guc->id) |
-               FIELD_PREP(WQ_RING_TAIL_MASK, e->lrc->ring.tail / sizeof(u64));
+       wqi[i++] = xe_lrc_descriptor(q->lrc);
+       wqi[i++] = FIELD_PREP(WQ_GUC_ID_MASK, q->guc->id) |
+               FIELD_PREP(WQ_RING_TAIL_MASK, q->lrc->ring.tail / sizeof(u64));
        wqi[i++] = 0;
-       for (j = 1; j < e->width; ++j) {
-               struct xe_lrc *lrc = e->lrc + j;
+       for (j = 1; j < q->width; ++j) {
+               struct xe_lrc *lrc = q->lrc + j;
 
                wqi[i++] = lrc->ring.tail / sizeof(u64);
        }
@@ -586,55 +586,55 @@ static void wq_item_append(struct xe_engine *e)
        XE_WARN_ON(i != wqi_size / sizeof(u32));
 
        iosys_map_incr(&map, offsetof(struct guc_submit_parallel_scratch,
-                                     wq[e->guc->wqi_tail / sizeof(u32)]));
+                                     wq[q->guc->wqi_tail / sizeof(u32)]));
        xe_map_memcpy_to(xe, &map, 0, wqi, wqi_size);
-       e->guc->wqi_tail += wqi_size;
-       XE_WARN_ON(e->guc->wqi_tail > WQ_SIZE);
+       q->guc->wqi_tail += wqi_size;
+       XE_WARN_ON(q->guc->wqi_tail > WQ_SIZE);
 
        xe_device_wmb(xe);
 
-       map = xe_lrc_parallel_map(e->lrc);
-       parallel_write(xe, map, wq_desc.tail, e->guc->wqi_tail);
+       map = xe_lrc_parallel_map(q->lrc);
+       parallel_write(xe, map, wq_desc.tail, q->guc->wqi_tail);
 }
 
 #define RESUME_PENDING ~0x0ull
-static void submit_engine(struct xe_engine *e)
+static void submit_exec_queue(struct xe_exec_queue *q)
 {
-       struct xe_guc *guc = engine_to_guc(e);
-       struct xe_lrc *lrc = e->lrc;
+       struct xe_guc *guc = exec_queue_to_guc(q);
+       struct xe_lrc *lrc = q->lrc;
        u32 action[3];
        u32 g2h_len = 0;
        u32 num_g2h = 0;
        int len = 0;
        bool extra_submit = false;
 
-       XE_WARN_ON(!engine_registered(e));
+       XE_WARN_ON(!exec_queue_registered(q));
 
-       if (xe_engine_is_parallel(e))
-               wq_item_append(e);
+       if (xe_exec_queue_is_parallel(q))
+               wq_item_append(q);
        else
                xe_lrc_write_ctx_reg(lrc, CTX_RING_TAIL, lrc->ring.tail);
 
-       if (engine_suspended(e) && !xe_engine_is_parallel(e))
+       if (exec_queue_suspended(q) && !xe_exec_queue_is_parallel(q))
                return;
 
-       if (!engine_enabled(e) && !engine_suspended(e)) {
+       if (!exec_queue_enabled(q) && !exec_queue_suspended(q)) {
                action[len++] = XE_GUC_ACTION_SCHED_CONTEXT_MODE_SET;
-               action[len++] = e->guc->id;
+               action[len++] = q->guc->id;
                action[len++] = GUC_CONTEXT_ENABLE;
                g2h_len = G2H_LEN_DW_SCHED_CONTEXT_MODE_SET;
                num_g2h = 1;
-               if (xe_engine_is_parallel(e))
+               if (xe_exec_queue_is_parallel(q))
                        extra_submit = true;
 
-               e->guc->resume_time = RESUME_PENDING;
-               set_engine_pending_enable(e);
-               set_engine_enabled(e);
-               trace_xe_engine_scheduling_enable(e);
+               q->guc->resume_time = RESUME_PENDING;
+               set_exec_queue_pending_enable(q);
+               set_exec_queue_enabled(q);
+               trace_xe_exec_queue_scheduling_enable(q);
        } else {
                action[len++] = XE_GUC_ACTION_SCHED_CONTEXT;
-               action[len++] = e->guc->id;
-               trace_xe_engine_submit(e);
+               action[len++] = q->guc->id;
+               trace_xe_exec_queue_submit(q);
        }
 
        xe_guc_ct_send(&guc->ct, action, len, g2h_len, num_g2h);
@@ -642,31 +642,31 @@ static void submit_engine(struct xe_engine *e)
        if (extra_submit) {
                len = 0;
                action[len++] = XE_GUC_ACTION_SCHED_CONTEXT;
-               action[len++] = e->guc->id;
-               trace_xe_engine_submit(e);
+               action[len++] = q->guc->id;
+               trace_xe_exec_queue_submit(q);
 
                xe_guc_ct_send(&guc->ct, action, len, 0, 0);
        }
 }
 
 static struct dma_fence *
-guc_engine_run_job(struct drm_sched_job *drm_job)
+guc_exec_queue_run_job(struct drm_sched_job *drm_job)
 {
        struct xe_sched_job *job = to_xe_sched_job(drm_job);
-       struct xe_engine *e = job->engine;
-       bool lr = xe_engine_is_lr(e);
+       struct xe_exec_queue *q = job->q;
+       bool lr = xe_exec_queue_is_lr(q);
 
-       XE_WARN_ON((engine_destroyed(e) || engine_pending_disable(e)) &&
-                  !engine_banned(e) && !engine_suspended(e));
+       XE_WARN_ON((exec_queue_destroyed(q) || exec_queue_pending_disable(q)) &&
+                  !exec_queue_banned(q) && !exec_queue_suspended(q));
 
        trace_xe_sched_job_run(job);
 
-       if (!engine_killed_or_banned(e) && !xe_sched_job_is_error(job)) {
-               if (!engine_registered(e))
-                       register_engine(e);
+       if (!exec_queue_killed_or_banned(q) && !xe_sched_job_is_error(job)) {
+               if (!exec_queue_registered(q))
+                       register_engine(q);
                if (!lr)        /* LR jobs are emitted in the exec IOCTL */
-                       e->ring_ops->emit_job(job);
-               submit_engine(e);
+                       q->ring_ops->emit_job(job);
+               submit_exec_queue(q);
        }
 
        if (lr) {
@@ -679,7 +679,7 @@ guc_engine_run_job(struct drm_sched_job *drm_job)
        }
 }
 
-static void guc_engine_free_job(struct drm_sched_job *drm_job)
+static void guc_exec_queue_free_job(struct drm_sched_job *drm_job)
 {
        struct xe_sched_job *job = to_xe_sched_job(drm_job);
 
@@ -692,37 +692,37 @@ static int guc_read_stopped(struct xe_guc *guc)
        return atomic_read(&guc->submission_state.stopped);
 }
 
-#define MAKE_SCHED_CONTEXT_ACTION(e, enable_disable)                   \
+#define MAKE_SCHED_CONTEXT_ACTION(q, enable_disable)                   \
        u32 action[] = {                                                \
                XE_GUC_ACTION_SCHED_CONTEXT_MODE_SET,                   \
-               e->guc->id,                                             \
+               q->guc->id,                                             \
                GUC_CONTEXT_##enable_disable,                           \
        }
 
 static void disable_scheduling_deregister(struct xe_guc *guc,
-                                         struct xe_engine *e)
+                                         struct xe_exec_queue *q)
 {
-       MAKE_SCHED_CONTEXT_ACTION(e, DISABLE);
+       MAKE_SCHED_CONTEXT_ACTION(q, DISABLE);
        int ret;
 
-       set_min_preemption_timeout(guc, e);
+       set_min_preemption_timeout(guc, q);
        smp_rmb();
-       ret = wait_event_timeout(guc->ct.wq, !engine_pending_enable(e) ||
+       ret = wait_event_timeout(guc->ct.wq, !exec_queue_pending_enable(q) ||
                                 guc_read_stopped(guc), HZ * 5);
        if (!ret) {
-               struct xe_gpu_scheduler *sched = &e->guc->sched;
+               struct xe_gpu_scheduler *sched = &q->guc->sched;
 
                XE_WARN_ON("Pending enable failed to respond");
                xe_sched_submission_start(sched);
-               xe_gt_reset_async(e->gt);
+               xe_gt_reset_async(q->gt);
                xe_sched_tdr_queue_imm(sched);
                return;
        }
 
-       clear_engine_enabled(e);
-       set_engine_pending_disable(e);
-       set_engine_destroyed(e);
-       trace_xe_engine_scheduling_disable(e);
+       clear_exec_queue_enabled(q);
+       set_exec_queue_pending_disable(q);
+       set_exec_queue_destroyed(q);
+       trace_xe_exec_queue_scheduling_disable(q);
 
        /*
         * Reserve space for both G2H here as the 2nd G2H is sent from a G2H
@@ -733,27 +733,27 @@ static void disable_scheduling_deregister(struct xe_guc *guc,
                       G2H_LEN_DW_DEREGISTER_CONTEXT, 2);
 }
 
-static void guc_engine_print(struct xe_engine *e, struct drm_printer *p);
+static void guc_exec_queue_print(struct xe_exec_queue *q, struct drm_printer *p);
 
 #if IS_ENABLED(CONFIG_DRM_XE_SIMPLE_ERROR_CAPTURE)
-static void simple_error_capture(struct xe_engine *e)
+static void simple_error_capture(struct xe_exec_queue *q)
 {
-       struct xe_guc *guc = engine_to_guc(e);
+       struct xe_guc *guc = exec_queue_to_guc(q);
        struct drm_printer p = drm_err_printer("");
        struct xe_hw_engine *hwe;
        enum xe_hw_engine_id id;
-       u32 adj_logical_mask = e->logical_mask;
-       u32 width_mask = (0x1 << e->width) - 1;
+       u32 adj_logical_mask = q->logical_mask;
+       u32 width_mask = (0x1 << q->width) - 1;
        int i;
        bool cookie;
 
-       if (e->vm && !e->vm->error_capture.capture_once) {
-               e->vm->error_capture.capture_once = true;
+       if (q->vm && !q->vm->error_capture.capture_once) {
+               q->vm->error_capture.capture_once = true;
                cookie = dma_fence_begin_signalling();
-               for (i = 0; e->width > 1 && i < XE_HW_ENGINE_MAX_INSTANCE;) {
+               for (i = 0; q->width > 1 && i < XE_HW_ENGINE_MAX_INSTANCE;) {
                        if (adj_logical_mask & BIT(i)) {
                                adj_logical_mask |= width_mask << i;
-                               i += e->width;
+                               i += q->width;
                        } else {
                                ++i;
                        }
@@ -761,66 +761,66 @@ static void simple_error_capture(struct xe_engine *e)
 
                xe_force_wake_get(gt_to_fw(guc_to_gt(guc)), XE_FORCEWAKE_ALL);
                xe_guc_ct_print(&guc->ct, &p, true);
-               guc_engine_print(e, &p);
+               guc_exec_queue_print(q, &p);
                for_each_hw_engine(hwe, guc_to_gt(guc), id) {
-                       if (hwe->class != e->hwe->class ||
+                       if (hwe->class != q->hwe->class ||
                            !(BIT(hwe->logical_instance) & adj_logical_mask))
                                continue;
                        xe_hw_engine_print(hwe, &p);
                }
-               xe_analyze_vm(&p, e->vm, e->gt->info.id);
+               xe_analyze_vm(&p, q->vm, q->gt->info.id);
                xe_force_wake_put(gt_to_fw(guc_to_gt(guc)), XE_FORCEWAKE_ALL);
                dma_fence_end_signalling(cookie);
        }
 }
 #else
-static void simple_error_capture(struct xe_engine *e)
+static void simple_error_capture(struct xe_exec_queue *q)
 {
 }
 #endif
 
-static void xe_guc_engine_trigger_cleanup(struct xe_engine *e)
+static void xe_guc_exec_queue_trigger_cleanup(struct xe_exec_queue *q)
 {
-       struct xe_guc *guc = engine_to_guc(e);
+       struct xe_guc *guc = exec_queue_to_guc(q);
 
-       if (xe_engine_is_lr(e))
-               queue_work(guc_to_gt(guc)->ordered_wq, &e->guc->lr_tdr);
+       if (xe_exec_queue_is_lr(q))
+               queue_work(guc_to_gt(guc)->ordered_wq, &q->guc->lr_tdr);
        else
-               xe_sched_tdr_queue_imm(&e->guc->sched);
+               xe_sched_tdr_queue_imm(&q->guc->sched);
 }
 
-static void xe_guc_engine_lr_cleanup(struct work_struct *w)
+static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
 {
-       struct xe_guc_engine *ge =
-               container_of(w, struct xe_guc_engine, lr_tdr);
-       struct xe_engine *e = ge->engine;
+       struct xe_guc_exec_queue *ge =
+               container_of(w, struct xe_guc_exec_queue, lr_tdr);
+       struct xe_exec_queue *q = ge->q;
        struct xe_gpu_scheduler *sched = &ge->sched;
 
-       XE_WARN_ON(!xe_engine_is_lr(e));
-       trace_xe_engine_lr_cleanup(e);
+       XE_WARN_ON(!xe_exec_queue_is_lr(q));
+       trace_xe_exec_queue_lr_cleanup(q);
 
        /* Kill the run_job / process_msg entry points */
        xe_sched_submission_stop(sched);
 
        /* Engine state now stable, disable scheduling / deregister if needed */
-       if (engine_registered(e)) {
-               struct xe_guc *guc = engine_to_guc(e);
+       if (exec_queue_registered(q)) {
+               struct xe_guc *guc = exec_queue_to_guc(q);
                int ret;
 
-               set_engine_banned(e);
-               disable_scheduling_deregister(guc, e);
+               set_exec_queue_banned(q);
+               disable_scheduling_deregister(guc, q);
 
                /*
                 * Must wait for scheduling to be disabled before signalling
                 * any fences, if GT broken the GT reset code should signal us.
                 */
                ret = wait_event_timeout(guc->ct.wq,
-                                        !engine_pending_disable(e) ||
+                                        !exec_queue_pending_disable(q) ||
                                         guc_read_stopped(guc), HZ * 5);
                if (!ret) {
                        XE_WARN_ON("Schedule disable failed to respond");
                        xe_sched_submission_start(sched);
-                       xe_gt_reset_async(e->gt);
+                       xe_gt_reset_async(q->gt);
                        return;
                }
        }
@@ -829,27 +829,27 @@ static void xe_guc_engine_lr_cleanup(struct work_struct *w)
 }
 
 static enum drm_gpu_sched_stat
-guc_engine_timedout_job(struct drm_sched_job *drm_job)
+guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
 {
        struct xe_sched_job *job = to_xe_sched_job(drm_job);
        struct xe_sched_job *tmp_job;
-       struct xe_engine *e = job->engine;
-       struct xe_gpu_scheduler *sched = &e->guc->sched;
-       struct xe_device *xe = guc_to_xe(engine_to_guc(e));
+       struct xe_exec_queue *q = job->q;
+       struct xe_gpu_scheduler *sched = &q->guc->sched;
+       struct xe_device *xe = guc_to_xe(exec_queue_to_guc(q));
        int err = -ETIME;
        int i = 0;
 
        if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags)) {
-               XE_WARN_ON(e->flags & ENGINE_FLAG_KERNEL);
-               XE_WARN_ON(e->flags & ENGINE_FLAG_VM && !engine_killed(e));
+               XE_WARN_ON(q->flags & EXEC_QUEUE_FLAG_KERNEL);
+               XE_WARN_ON(q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q));
 
                drm_notice(&xe->drm, "Timedout job: seqno=%u, guc_id=%d, flags=0x%lx",
-                          xe_sched_job_seqno(job), e->guc->id, e->flags);
-               simple_error_capture(e);
-               xe_devcoredump(e);
+                          xe_sched_job_seqno(job), q->guc->id, q->flags);
+               simple_error_capture(q);
+               xe_devcoredump(q);
        } else {
                drm_dbg(&xe->drm, "Timedout signaled job: seqno=%u, guc_id=%d, flags=0x%lx",
-                        xe_sched_job_seqno(job), e->guc->id, e->flags);
+                        xe_sched_job_seqno(job), q->guc->id, q->flags);
        }
        trace_xe_sched_job_timedout(job);
 
@@ -860,26 +860,26 @@ guc_engine_timedout_job(struct drm_sched_job *drm_job)
         * Kernel jobs should never fail, nor should VM jobs if they do
         * somethings has gone wrong and the GT needs a reset
         */
-       if (e->flags & ENGINE_FLAG_KERNEL ||
-           (e->flags & ENGINE_FLAG_VM && !engine_killed(e))) {
+       if (q->flags & EXEC_QUEUE_FLAG_KERNEL ||
+           (q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q))) {
                if (!xe_sched_invalidate_job(job, 2)) {
                        xe_sched_add_pending_job(sched, job);
                        xe_sched_submission_start(sched);
-                       xe_gt_reset_async(e->gt);
+                       xe_gt_reset_async(q->gt);
                        goto out;
                }
        }
 
        /* Engine state now stable, disable scheduling if needed */
-       if (engine_enabled(e)) {
-               struct xe_guc *guc = engine_to_guc(e);
+       if (exec_queue_enabled(q)) {
+               struct xe_guc *guc = exec_queue_to_guc(q);
                int ret;
 
-               if (engine_reset(e))
+               if (exec_queue_reset(q))
                        err = -EIO;
-               set_engine_banned(e);
-               xe_engine_get(e);
-               disable_scheduling_deregister(guc, e);
+               set_exec_queue_banned(q);
+               xe_exec_queue_get(q);
+               disable_scheduling_deregister(guc, q);
 
                /*
                 * Must wait for scheduling to be disabled before signalling
@@ -891,20 +891,20 @@ guc_engine_timedout_job(struct drm_sched_job *drm_job)
                 */
                smp_rmb();
                ret = wait_event_timeout(guc->ct.wq,
-                                        !engine_pending_disable(e) ||
+                                        !exec_queue_pending_disable(q) ||
                                         guc_read_stopped(guc), HZ * 5);
                if (!ret) {
                        XE_WARN_ON("Schedule disable failed to respond");
                        xe_sched_add_pending_job(sched, job);
                        xe_sched_submission_start(sched);
-                       xe_gt_reset_async(e->gt);
+                       xe_gt_reset_async(q->gt);
                        xe_sched_tdr_queue_imm(sched);
                        goto out;
                }
        }
 
        /* Stop fence signaling */
-       xe_hw_fence_irq_stop(e->fence_irq);
+       xe_hw_fence_irq_stop(q->fence_irq);
 
        /*
         * Fence state now stable, stop / start scheduler which cleans up any
@@ -912,7 +912,7 @@ guc_engine_timedout_job(struct drm_sched_job *drm_job)
         */
        xe_sched_add_pending_job(sched, job);
        xe_sched_submission_start(sched);
-       xe_guc_engine_trigger_cleanup(e);
+       xe_guc_exec_queue_trigger_cleanup(q);
 
        /* Mark all outstanding jobs as bad, thus completing them */
        spin_lock(&sched->base.job_list_lock);
@@ -921,53 +921,53 @@ guc_engine_timedout_job(struct drm_sched_job *drm_job)
        spin_unlock(&sched->base.job_list_lock);
 
        /* Start fence signaling */
-       xe_hw_fence_irq_start(e->fence_irq);
+       xe_hw_fence_irq_start(q->fence_irq);
 
 out:
        return DRM_GPU_SCHED_STAT_NOMINAL;
 }
 
-static void __guc_engine_fini_async(struct work_struct *w)
+static void __guc_exec_queue_fini_async(struct work_struct *w)
 {
-       struct xe_guc_engine *ge =
-               container_of(w, struct xe_guc_engine, fini_async);
-       struct xe_engine *e = ge->engine;
-       struct xe_guc *guc = engine_to_guc(e);
+       struct xe_guc_exec_queue *ge =
+               container_of(w, struct xe_guc_exec_queue, fini_async);
+       struct xe_exec_queue *q = ge->q;
+       struct xe_guc *guc = exec_queue_to_guc(q);
 
-       trace_xe_engine_destroy(e);
+       trace_xe_exec_queue_destroy(q);
 
-       if (xe_engine_is_lr(e))
+       if (xe_exec_queue_is_lr(q))
                cancel_work_sync(&ge->lr_tdr);
-       if (e->flags & ENGINE_FLAG_PERSISTENT)
-               xe_device_remove_persistent_engines(gt_to_xe(e->gt), e);
-       release_guc_id(guc, e);
+       if (q->flags & EXEC_QUEUE_FLAG_PERSISTENT)
+               xe_device_remove_persistent_exec_queues(gt_to_xe(q->gt), q);
+       release_guc_id(guc, q);
        xe_sched_entity_fini(&ge->entity);
        xe_sched_fini(&ge->sched);
 
-       if (!(e->flags & ENGINE_FLAG_KERNEL)) {
+       if (!(q->flags & EXEC_QUEUE_FLAG_KERNEL)) {
                kfree(ge);
-               xe_engine_fini(e);
+               xe_exec_queue_fini(q);
        }
 }
 
-static void guc_engine_fini_async(struct xe_engine *e)
+static void guc_exec_queue_fini_async(struct xe_exec_queue *q)
 {
-       bool kernel = e->flags & ENGINE_FLAG_KERNEL;
+       bool kernel = q->flags & EXEC_QUEUE_FLAG_KERNEL;
 
-       INIT_WORK(&e->guc->fini_async, __guc_engine_fini_async);
-       queue_work(system_wq, &e->guc->fini_async);
+       INIT_WORK(&q->guc->fini_async, __guc_exec_queue_fini_async);
+       queue_work(system_wq, &q->guc->fini_async);
 
        /* We must block on kernel engines so slabs are empty on driver unload */
        if (kernel) {
-               struct xe_guc_engine *ge = e->guc;
+               struct xe_guc_exec_queue *ge = q->guc;
 
                flush_work(&ge->fini_async);
                kfree(ge);
-               xe_engine_fini(e);
+               xe_exec_queue_fini(q);
        }
 }
 
-static void __guc_engine_fini(struct xe_guc *guc, struct xe_engine *e)
+static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q)
 {
        /*
         * Might be done from within the GPU scheduler, need to do async as we
@@ -976,104 +976,104 @@ static void __guc_engine_fini(struct xe_guc *guc, struct xe_engine *e)
         * this we and don't really care when everything is fini'd, just that it
         * is.
         */
-       guc_engine_fini_async(e);
+       guc_exec_queue_fini_async(q);
 }
 
-static void __guc_engine_process_msg_cleanup(struct xe_sched_msg *msg)
+static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg)
 {
-       struct xe_engine *e = msg->private_data;
-       struct xe_guc *guc = engine_to_guc(e);
+       struct xe_exec_queue *q = msg->private_data;
+       struct xe_guc *guc = exec_queue_to_guc(q);
 
-       XE_WARN_ON(e->flags & ENGINE_FLAG_KERNEL);
-       trace_xe_engine_cleanup_entity(e);
+       XE_WARN_ON(q->flags & EXEC_QUEUE_FLAG_KERNEL);
+       trace_xe_exec_queue_cleanup_entity(q);
 
-       if (engine_registered(e))
-               disable_scheduling_deregister(guc, e);
+       if (exec_queue_registered(q))
+               disable_scheduling_deregister(guc, q);
        else
-               __guc_engine_fini(guc, e);
+               __guc_exec_queue_fini(guc, q);
 }
 
-static bool guc_engine_allowed_to_change_state(struct xe_engine *e)
+static bool guc_exec_queue_allowed_to_change_state(struct xe_exec_queue *q)
 {
-       return !engine_killed_or_banned(e) && engine_registered(e);
+       return !exec_queue_killed_or_banned(q) && exec_queue_registered(q);
 }
 
-static void __guc_engine_process_msg_set_sched_props(struct xe_sched_msg *msg)
+static void __guc_exec_queue_process_msg_set_sched_props(struct xe_sched_msg *msg)
 {
-       struct xe_engine *e = msg->private_data;
-       struct xe_guc *guc = engine_to_guc(e);
+       struct xe_exec_queue *q = msg->private_data;
+       struct xe_guc *guc = exec_queue_to_guc(q);
 
-       if (guc_engine_allowed_to_change_state(e))
-               init_policies(guc, e);
+       if (guc_exec_queue_allowed_to_change_state(q))
+               init_policies(guc, q);
        kfree(msg);
 }
 
-static void suspend_fence_signal(struct xe_engine *e)
+static void suspend_fence_signal(struct xe_exec_queue *q)
 {
-       struct xe_guc *guc = engine_to_guc(e);
+       struct xe_guc *guc = exec_queue_to_guc(q);
 
-       XE_WARN_ON(!engine_suspended(e) && !engine_killed(e) &&
+       XE_WARN_ON(!exec_queue_suspended(q) && !exec_queue_killed(q) &&
                   !guc_read_stopped(guc));
-       XE_WARN_ON(!e->guc->suspend_pending);
+       XE_WARN_ON(!q->guc->suspend_pending);
 
-       e->guc->suspend_pending = false;
+       q->guc->suspend_pending = false;
        smp_wmb();
-       wake_up(&e->guc->suspend_wait);
+       wake_up(&q->guc->suspend_wait);
 }
 
-static void __guc_engine_process_msg_suspend(struct xe_sched_msg *msg)
+static void __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg)
 {
-       struct xe_engine *e = msg->private_data;
-       struct xe_guc *guc = engine_to_guc(e);
+       struct xe_exec_queue *q = msg->private_data;
+       struct xe_guc *guc = exec_queue_to_guc(q);
 
-       if (guc_engine_allowed_to_change_state(e) && !engine_suspended(e) &&
-           engine_enabled(e)) {
-               wait_event(guc->ct.wq, e->guc->resume_time != RESUME_PENDING ||
+       if (guc_exec_queue_allowed_to_change_state(q) && !exec_queue_suspended(q) &&
+           exec_queue_enabled(q)) {
+               wait_event(guc->ct.wq, q->guc->resume_time != RESUME_PENDING ||
                           guc_read_stopped(guc));
 
                if (!guc_read_stopped(guc)) {
-                       MAKE_SCHED_CONTEXT_ACTION(e, DISABLE);
+                       MAKE_SCHED_CONTEXT_ACTION(q, DISABLE);
                        s64 since_resume_ms =
                                ktime_ms_delta(ktime_get(),
-                                              e->guc->resume_time);
-                       s64 wait_ms = e->vm->preempt.min_run_period_ms -
+                                              q->guc->resume_time);
+                       s64 wait_ms = q->vm->preempt.min_run_period_ms -
                                since_resume_ms;
 
-                       if (wait_ms > 0 && e->guc->resume_time)
+                       if (wait_ms > 0 && q->guc->resume_time)
                                msleep(wait_ms);
 
-                       set_engine_suspended(e);
-                       clear_engine_enabled(e);
-                       set_engine_pending_disable(e);
-                       trace_xe_engine_scheduling_disable(e);
+                       set_exec_queue_suspended(q);
+                       clear_exec_queue_enabled(q);
+                       set_exec_queue_pending_disable(q);
+                       trace_xe_exec_queue_scheduling_disable(q);
 
                        xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action),
                                       G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, 1);
                }
-       } else if (e->guc->suspend_pending) {
-               set_engine_suspended(e);
-               suspend_fence_signal(e);
+       } else if (q->guc->suspend_pending) {
+               set_exec_queue_suspended(q);
+               suspend_fence_signal(q);
        }
 }
 
-static void __guc_engine_process_msg_resume(struct xe_sched_msg *msg)
+static void __guc_exec_queue_process_msg_resume(struct xe_sched_msg *msg)
 {
-       struct xe_engine *e = msg->private_data;
-       struct xe_guc *guc = engine_to_guc(e);
+       struct xe_exec_queue *q = msg->private_data;
+       struct xe_guc *guc = exec_queue_to_guc(q);
 
-       if (guc_engine_allowed_to_change_state(e)) {
-               MAKE_SCHED_CONTEXT_ACTION(e, ENABLE);
+       if (guc_exec_queue_allowed_to_change_state(q)) {
+               MAKE_SCHED_CONTEXT_ACTION(q, ENABLE);
 
-               e->guc->resume_time = RESUME_PENDING;
-               clear_engine_suspended(e);
-               set_engine_pending_enable(e);
-               set_engine_enabled(e);
-               trace_xe_engine_scheduling_enable(e);
+               q->guc->resume_time = RESUME_PENDING;
+               clear_exec_queue_suspended(q);
+               set_exec_queue_pending_enable(q);
+               set_exec_queue_enabled(q);
+               trace_xe_exec_queue_scheduling_enable(q);
 
                xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action),
                               G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, 1);
        } else {
-               clear_engine_suspended(e);
+               clear_exec_queue_suspended(q);
        }
 }
 
@@ -1082,22 +1082,22 @@ static void __guc_engine_process_msg_resume(struct xe_sched_msg *msg)
 #define SUSPEND                3
 #define RESUME         4
 
-static void guc_engine_process_msg(struct xe_sched_msg *msg)
+static void guc_exec_queue_process_msg(struct xe_sched_msg *msg)
 {
        trace_xe_sched_msg_recv(msg);
 
        switch (msg->opcode) {
        case CLEANUP:
-               __guc_engine_process_msg_cleanup(msg);
+               __guc_exec_queue_process_msg_cleanup(msg);
                break;
        case SET_SCHED_PROPS:
-               __guc_engine_process_msg_set_sched_props(msg);
+               __guc_exec_queue_process_msg_set_sched_props(msg);
                break;
        case SUSPEND:
-               __guc_engine_process_msg_suspend(msg);
+               __guc_exec_queue_process_msg_suspend(msg);
                break;
        case RESUME:
-               __guc_engine_process_msg_resume(msg);
+               __guc_exec_queue_process_msg_resume(msg);
                break;
        default:
                XE_WARN_ON("Unknown message type");
@@ -1105,20 +1105,20 @@ static void guc_engine_process_msg(struct xe_sched_msg *msg)
 }
 
 static const struct drm_sched_backend_ops drm_sched_ops = {
-       .run_job = guc_engine_run_job,
-       .free_job = guc_engine_free_job,
-       .timedout_job = guc_engine_timedout_job,
+       .run_job = guc_exec_queue_run_job,
+       .free_job = guc_exec_queue_free_job,
+       .timedout_job = guc_exec_queue_timedout_job,
 };
 
 static const struct xe_sched_backend_ops xe_sched_ops = {
-       .process_msg = guc_engine_process_msg,
+       .process_msg = guc_exec_queue_process_msg,
 };
 
-static int guc_engine_init(struct xe_engine *e)
+static int guc_exec_queue_init(struct xe_exec_queue *q)
 {
        struct xe_gpu_scheduler *sched;
-       struct xe_guc *guc = engine_to_guc(e);
-       struct xe_guc_engine *ge;
+       struct xe_guc *guc = exec_queue_to_guc(q);
+       struct xe_guc_exec_queue *ge;
        long timeout;
        int err;
 
@@ -1128,15 +1128,15 @@ static int guc_engine_init(struct xe_engine *e)
        if (!ge)
                return -ENOMEM;
 
-       e->guc = ge;
-       ge->engine = e;
+       q->guc = ge;
+       ge->q = q;
        init_waitqueue_head(&ge->suspend_wait);
 
-       timeout = xe_vm_no_dma_fences(e->vm) ? MAX_SCHEDULE_TIMEOUT : HZ * 5;
+       timeout = xe_vm_no_dma_fences(q->vm) ? MAX_SCHEDULE_TIMEOUT : HZ * 5;
        err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops, NULL,
-                            e->lrc[0].ring.size / MAX_JOB_SIZE_BYTES,
+                            q->lrc[0].ring.size / MAX_JOB_SIZE_BYTES,
                             64, timeout, guc_to_gt(guc)->ordered_wq, NULL,
-                            e->name, gt_to_xe(e->gt)->drm.dev);
+                            q->name, gt_to_xe(q->gt)->drm.dev);
        if (err)
                goto err_free;
 
@@ -1144,45 +1144,45 @@ static int guc_engine_init(struct xe_engine *e)
        err = xe_sched_entity_init(&ge->entity, sched);
        if (err)
                goto err_sched;
-       e->priority = XE_ENGINE_PRIORITY_NORMAL;
+       q->priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
 
-       if (xe_engine_is_lr(e))
-               INIT_WORK(&e->guc->lr_tdr, xe_guc_engine_lr_cleanup);
+       if (xe_exec_queue_is_lr(q))
+               INIT_WORK(&q->guc->lr_tdr, xe_guc_exec_queue_lr_cleanup);
 
        mutex_lock(&guc->submission_state.lock);
 
-       err = alloc_guc_id(guc, e);
+       err = alloc_guc_id(guc, q);
        if (err)
                goto err_entity;
 
-       e->entity = &ge->entity;
+       q->entity = &ge->entity;
 
        if (guc_read_stopped(guc))
                xe_sched_stop(sched);
 
        mutex_unlock(&guc->submission_state.lock);
 
-       switch (e->class) {
+       switch (q->class) {
        case XE_ENGINE_CLASS_RENDER:
-               sprintf(e->name, "rcs%d", e->guc->id);
+               sprintf(q->name, "rcs%d", q->guc->id);
                break;
        case XE_ENGINE_CLASS_VIDEO_DECODE:
-               sprintf(e->name, "vcs%d", e->guc->id);
+               sprintf(q->name, "vcs%d", q->guc->id);
                break;
        case XE_ENGINE_CLASS_VIDEO_ENHANCE:
-               sprintf(e->name, "vecs%d", e->guc->id);
+               sprintf(q->name, "vecs%d", q->guc->id);
                break;
        case XE_ENGINE_CLASS_COPY:
-               sprintf(e->name, "bcs%d", e->guc->id);
+               sprintf(q->name, "bcs%d", q->guc->id);
                break;
        case XE_ENGINE_CLASS_COMPUTE:
-               sprintf(e->name, "ccs%d", e->guc->id);
+               sprintf(q->name, "ccs%d", q->guc->id);
                break;
        default:
-               XE_WARN_ON(e->class);
+               XE_WARN_ON(q->class);
        }
 
-       trace_xe_engine_create(e);
+       trace_xe_exec_queue_create(q);
 
        return 0;
 
@@ -1196,133 +1196,133 @@ err_free:
        return err;
 }
 
-static void guc_engine_kill(struct xe_engine *e)
+static void guc_exec_queue_kill(struct xe_exec_queue *q)
 {
-       trace_xe_engine_kill(e);
-       set_engine_killed(e);
-       xe_guc_engine_trigger_cleanup(e);
+       trace_xe_exec_queue_kill(q);
+       set_exec_queue_killed(q);
+       xe_guc_exec_queue_trigger_cleanup(q);
 }
 
-static void guc_engine_add_msg(struct xe_engine *e, struct xe_sched_msg *msg,
-                              u32 opcode)
+static void guc_exec_queue_add_msg(struct xe_exec_queue *q, struct xe_sched_msg *msg,
+                                  u32 opcode)
 {
        INIT_LIST_HEAD(&msg->link);
        msg->opcode = opcode;
-       msg->private_data = e;
+       msg->private_data = q;
 
        trace_xe_sched_msg_add(msg);
-       xe_sched_add_msg(&e->guc->sched, msg);
+       xe_sched_add_msg(&q->guc->sched, msg);
 }
 
 #define STATIC_MSG_CLEANUP     0
 #define STATIC_MSG_SUSPEND     1
 #define STATIC_MSG_RESUME      2
-static void guc_engine_fini(struct xe_engine *e)
+static void guc_exec_queue_fini(struct xe_exec_queue *q)
 {
-       struct xe_sched_msg *msg = e->guc->static_msgs + STATIC_MSG_CLEANUP;
+       struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_CLEANUP;
 
-       if (!(e->flags & ENGINE_FLAG_KERNEL))
-               guc_engine_add_msg(e, msg, CLEANUP);
+       if (!(q->flags & EXEC_QUEUE_FLAG_KERNEL))
+               guc_exec_queue_add_msg(q, msg, CLEANUP);
        else
-               __guc_engine_fini(engine_to_guc(e), e);
+               __guc_exec_queue_fini(exec_queue_to_guc(q), q);
 }
 
-static int guc_engine_set_priority(struct xe_engine *e,
-                                  enum xe_engine_priority priority)
+static int guc_exec_queue_set_priority(struct xe_exec_queue *q,
+                                      enum xe_exec_queue_priority priority)
 {
        struct xe_sched_msg *msg;
 
-       if (e->priority == priority || engine_killed_or_banned(e))
+       if (q->priority == priority || exec_queue_killed_or_banned(q))
                return 0;
 
        msg = kmalloc(sizeof(*msg), GFP_KERNEL);
        if (!msg)
                return -ENOMEM;
 
-       guc_engine_add_msg(e, msg, SET_SCHED_PROPS);
-       e->priority = priority;
+       guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS);
+       q->priority = priority;
 
        return 0;
 }
 
-static int guc_engine_set_timeslice(struct xe_engine *e, u32 timeslice_us)
+static int guc_exec_queue_set_timeslice(struct xe_exec_queue *q, u32 timeslice_us)
 {
        struct xe_sched_msg *msg;
 
-       if (e->sched_props.timeslice_us == timeslice_us ||
-           engine_killed_or_banned(e))
+       if (q->sched_props.timeslice_us == timeslice_us ||
+           exec_queue_killed_or_banned(q))
                return 0;
 
        msg = kmalloc(sizeof(*msg), GFP_KERNEL);
        if (!msg)
                return -ENOMEM;
 
-       e->sched_props.timeslice_us = timeslice_us;
-       guc_engine_add_msg(e, msg, SET_SCHED_PROPS);
+       q->sched_props.timeslice_us = timeslice_us;
+       guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS);
 
        return 0;
 }
 
-static int guc_engine_set_preempt_timeout(struct xe_engine *e,
-                                         u32 preempt_timeout_us)
+static int guc_exec_queue_set_preempt_timeout(struct xe_exec_queue *q,
+                                             u32 preempt_timeout_us)
 {
        struct xe_sched_msg *msg;
 
-       if (e->sched_props.preempt_timeout_us == preempt_timeout_us ||
-           engine_killed_or_banned(e))
+       if (q->sched_props.preempt_timeout_us == preempt_timeout_us ||
+           exec_queue_killed_or_banned(q))
                return 0;
 
        msg = kmalloc(sizeof(*msg), GFP_KERNEL);
        if (!msg)
                return -ENOMEM;
 
-       e->sched_props.preempt_timeout_us = preempt_timeout_us;
-       guc_engine_add_msg(e, msg, SET_SCHED_PROPS);
+       q->sched_props.preempt_timeout_us = preempt_timeout_us;
+       guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS);
 
        return 0;
 }
 
-static int guc_engine_set_job_timeout(struct xe_engine *e, u32 job_timeout_ms)
+static int guc_exec_queue_set_job_timeout(struct xe_exec_queue *q, u32 job_timeout_ms)
 {
-       struct xe_gpu_scheduler *sched = &e->guc->sched;
+       struct xe_gpu_scheduler *sched = &q->guc->sched;
 
-       XE_WARN_ON(engine_registered(e));
-       XE_WARN_ON(engine_banned(e));
-       XE_WARN_ON(engine_killed(e));
+       XE_WARN_ON(exec_queue_registered(q));
+       XE_WARN_ON(exec_queue_banned(q));
+       XE_WARN_ON(exec_queue_killed(q));
 
        sched->base.timeout = job_timeout_ms;
 
        return 0;
 }
 
-static int guc_engine_suspend(struct xe_engine *e)
+static int guc_exec_queue_suspend(struct xe_exec_queue *q)
 {
-       struct xe_sched_msg *msg = e->guc->static_msgs + STATIC_MSG_SUSPEND;
+       struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_SUSPEND;
 
-       if (engine_killed_or_banned(e) || e->guc->suspend_pending)
+       if (exec_queue_killed_or_banned(q) || q->guc->suspend_pending)
                return -EINVAL;
 
-       e->guc->suspend_pending = true;
-       guc_engine_add_msg(e, msg, SUSPEND);
+       q->guc->suspend_pending = true;
+       guc_exec_queue_add_msg(q, msg, SUSPEND);
 
        return 0;
 }
 
-static void guc_engine_suspend_wait(struct xe_engine *e)
+static void guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
 {
-       struct xe_guc *guc = engine_to_guc(e);
+       struct xe_guc *guc = exec_queue_to_guc(q);
 
-       wait_event(e->guc->suspend_wait, !e->guc->suspend_pending ||
+       wait_event(q->guc->suspend_wait, !q->guc->suspend_pending ||
                   guc_read_stopped(guc));
 }
 
-static void guc_engine_resume(struct xe_engine *e)
+static void guc_exec_queue_resume(struct xe_exec_queue *q)
 {
-       struct xe_sched_msg *msg = e->guc->static_msgs + STATIC_MSG_RESUME;
+       struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_RESUME;
 
-       XE_WARN_ON(e->guc->suspend_pending);
+       XE_WARN_ON(q->guc->suspend_pending);
 
-       guc_engine_add_msg(e, msg, RESUME);
+       guc_exec_queue_add_msg(q, msg, RESUME);
 }
 
 /*
@@ -1331,49 +1331,49 @@ static void guc_engine_resume(struct xe_engine *e)
  * really shouldn't do much other than trap into the DRM scheduler which
  * synchronizes these operations.
  */
-static const struct xe_engine_ops guc_engine_ops = {
-       .init = guc_engine_init,
-       .kill = guc_engine_kill,
-       .fini = guc_engine_fini,
-       .set_priority = guc_engine_set_priority,
-       .set_timeslice = guc_engine_set_timeslice,
-       .set_preempt_timeout = guc_engine_set_preempt_timeout,
-       .set_job_timeout = guc_engine_set_job_timeout,
-       .suspend = guc_engine_suspend,
-       .suspend_wait = guc_engine_suspend_wait,
-       .resume = guc_engine_resume,
+static const struct xe_exec_queue_ops guc_exec_queue_ops = {
+       .init = guc_exec_queue_init,
+       .kill = guc_exec_queue_kill,
+       .fini = guc_exec_queue_fini,
+       .set_priority = guc_exec_queue_set_priority,
+       .set_timeslice = guc_exec_queue_set_timeslice,
+       .set_preempt_timeout = guc_exec_queue_set_preempt_timeout,
+       .set_job_timeout = guc_exec_queue_set_job_timeout,
+       .suspend = guc_exec_queue_suspend,
+       .suspend_wait = guc_exec_queue_suspend_wait,
+       .resume = guc_exec_queue_resume,
 };
 
-static void guc_engine_stop(struct xe_guc *guc, struct xe_engine *e)
+static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
 {
-       struct xe_gpu_scheduler *sched = &e->guc->sched;
+       struct xe_gpu_scheduler *sched = &q->guc->sched;
 
        /* Stop scheduling + flush any DRM scheduler operations */
        xe_sched_submission_stop(sched);
 
        /* Clean up lost G2H + reset engine state */
-       if (engine_registered(e)) {
-               if ((engine_banned(e) && engine_destroyed(e)) ||
-                   xe_engine_is_lr(e))
-                       xe_engine_put(e);
-               else if (engine_destroyed(e))
-                       __guc_engine_fini(guc, e);
+       if (exec_queue_registered(q)) {
+               if ((exec_queue_banned(q) && exec_queue_destroyed(q)) ||
+                   xe_exec_queue_is_lr(q))
+                       xe_exec_queue_put(q);
+               else if (exec_queue_destroyed(q))
+                       __guc_exec_queue_fini(guc, q);
        }
-       if (e->guc->suspend_pending) {
-               set_engine_suspended(e);
-               suspend_fence_signal(e);
+       if (q->guc->suspend_pending) {
+               set_exec_queue_suspended(q);
+               suspend_fence_signal(q);
        }
-       atomic_and(ENGINE_STATE_DESTROYED | ENGINE_STATE_SUSPENDED,
-                  &e->guc->state);
-       e->guc->resume_time = 0;
-       trace_xe_engine_stop(e);
+       atomic_and(EXEC_QUEUE_STATE_DESTROYED | ENGINE_STATE_SUSPENDED,
+                  &q->guc->state);
+       q->guc->resume_time = 0;
+       trace_xe_exec_queue_stop(q);
 
        /*
         * Ban any engine (aside from kernel and engines used for VM ops) with a
         * started but not complete job or if a job has gone through a GT reset
         * more than twice.
         */
-       if (!(e->flags & (ENGINE_FLAG_KERNEL | ENGINE_FLAG_VM))) {
+       if (!(q->flags & (EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_VM))) {
                struct xe_sched_job *job = xe_sched_first_pending_job(sched);
 
                if (job) {
@@ -1381,8 +1381,8 @@ static void guc_engine_stop(struct xe_guc *guc, struct xe_engine *e)
                            !xe_sched_job_completed(job)) ||
                            xe_sched_invalidate_job(job, 2)) {
                                trace_xe_sched_job_ban(job);
-                               xe_sched_tdr_queue_imm(&e->guc->sched);
-                               set_engine_banned(e);
+                               xe_sched_tdr_queue_imm(&q->guc->sched);
+                               set_exec_queue_banned(q);
                        }
                }
        }
@@ -1413,15 +1413,15 @@ void xe_guc_submit_reset_wait(struct xe_guc *guc)
 
 int xe_guc_submit_stop(struct xe_guc *guc)
 {
-       struct xe_engine *e;
+       struct xe_exec_queue *q;
        unsigned long index;
 
        XE_WARN_ON(guc_read_stopped(guc) != 1);
 
        mutex_lock(&guc->submission_state.lock);
 
-       xa_for_each(&guc->submission_state.engine_lookup, index, e)
-               guc_engine_stop(guc, e);
+       xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
+               guc_exec_queue_stop(guc, q);
 
        mutex_unlock(&guc->submission_state.lock);
 
@@ -1433,16 +1433,16 @@ int xe_guc_submit_stop(struct xe_guc *guc)
        return 0;
 }
 
-static void guc_engine_start(struct xe_engine *e)
+static void guc_exec_queue_start(struct xe_exec_queue *q)
 {
-       struct xe_gpu_scheduler *sched = &e->guc->sched;
+       struct xe_gpu_scheduler *sched = &q->guc->sched;
 
-       if (!engine_killed_or_banned(e)) {
+       if (!exec_queue_killed_or_banned(q)) {
                int i;
 
-               trace_xe_engine_resubmit(e);
-               for (i = 0; i < e->width; ++i)
-                       xe_lrc_set_ring_head(e->lrc + i, e->lrc[i].ring.tail);
+               trace_xe_exec_queue_resubmit(q);
+               for (i = 0; i < q->width; ++i)
+                       xe_lrc_set_ring_head(q->lrc + i, q->lrc[i].ring.tail);
                xe_sched_resubmit_jobs(sched);
        }
 
@@ -1451,15 +1451,15 @@ static void guc_engine_start(struct xe_engine *e)
 
 int xe_guc_submit_start(struct xe_guc *guc)
 {
-       struct xe_engine *e;
+       struct xe_exec_queue *q;
        unsigned long index;
 
        XE_WARN_ON(guc_read_stopped(guc) != 1);
 
        mutex_lock(&guc->submission_state.lock);
        atomic_dec(&guc->submission_state.stopped);
-       xa_for_each(&guc->submission_state.engine_lookup, index, e)
-               guc_engine_start(e);
+       xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
+               guc_exec_queue_start(q);
        mutex_unlock(&guc->submission_state.lock);
 
        wake_up_all(&guc->ct.wq);
@@ -1467,36 +1467,36 @@ int xe_guc_submit_start(struct xe_guc *guc)
        return 0;
 }
 
-static struct xe_engine *
-g2h_engine_lookup(struct xe_guc *guc, u32 guc_id)
+static struct xe_exec_queue *
+g2h_exec_queue_lookup(struct xe_guc *guc, u32 guc_id)
 {
        struct xe_device *xe = guc_to_xe(guc);
-       struct xe_engine *e;
+       struct xe_exec_queue *q;
 
        if (unlikely(guc_id >= GUC_ID_MAX)) {
                drm_err(&xe->drm, "Invalid guc_id %u", guc_id);
                return NULL;
        }
 
-       e = xa_load(&guc->submission_state.engine_lookup, guc_id);
-       if (unlikely(!e)) {
+       q = xa_load(&guc->submission_state.exec_queue_lookup, guc_id);
+       if (unlikely(!q)) {
                drm_err(&xe->drm, "Not engine present for guc_id %u", guc_id);
                return NULL;
        }
 
-       XE_WARN_ON(e->guc->id != guc_id);
+       XE_WARN_ON(q->guc->id != guc_id);
 
-       return e;
+       return q;
 }
 
-static void deregister_engine(struct xe_guc *guc, struct xe_engine *e)
+static void deregister_exec_queue(struct xe_guc *guc, struct xe_exec_queue *q)
 {
        u32 action[] = {
                XE_GUC_ACTION_DEREGISTER_CONTEXT,
-               e->guc->id,
+               q->guc->id,
        };
 
-       trace_xe_engine_deregister(e);
+       trace_xe_exec_queue_deregister(q);
 
        xe_guc_ct_send_g2h_handler(&guc->ct, action, ARRAY_SIZE(action));
 }
@@ -1504,7 +1504,7 @@ static void deregister_engine(struct xe_guc *guc, struct xe_engine *e)
 int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
 {
        struct xe_device *xe = guc_to_xe(guc);
-       struct xe_engine *e;
+       struct xe_exec_queue *q;
        u32 guc_id = msg[0];
 
        if (unlikely(len < 2)) {
@@ -1512,34 +1512,34 @@ int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
                return -EPROTO;
        }
 
-       e = g2h_engine_lookup(guc, guc_id);
-       if (unlikely(!e))
+       q = g2h_exec_queue_lookup(guc, guc_id);
+       if (unlikely(!q))
                return -EPROTO;
 
-       if (unlikely(!engine_pending_enable(e) &&
-                    !engine_pending_disable(e))) {
+       if (unlikely(!exec_queue_pending_enable(q) &&
+                    !exec_queue_pending_disable(q))) {
                drm_err(&xe->drm, "Unexpected engine state 0x%04x",
-                       atomic_read(&e->guc->state));
+                       atomic_read(&q->guc->state));
                return -EPROTO;
        }
 
-       trace_xe_engine_scheduling_done(e);
+       trace_xe_exec_queue_scheduling_done(q);
 
-       if (engine_pending_enable(e)) {
-               e->guc->resume_time = ktime_get();
-               clear_engine_pending_enable(e);
+       if (exec_queue_pending_enable(q)) {
+               q->guc->resume_time = ktime_get();
+               clear_exec_queue_pending_enable(q);
                smp_wmb();
                wake_up_all(&guc->ct.wq);
        } else {
-               clear_engine_pending_disable(e);
-               if (e->guc->suspend_pending) {
-                       suspend_fence_signal(e);
+               clear_exec_queue_pending_disable(q);
+               if (q->guc->suspend_pending) {
+                       suspend_fence_signal(q);
                } else {
-                       if (engine_banned(e)) {
+                       if (exec_queue_banned(q)) {
                                smp_wmb();
                                wake_up_all(&guc->ct.wq);
                        }
-                       deregister_engine(guc, e);
+                       deregister_exec_queue(guc, q);
                }
        }
 
@@ -1549,7 +1549,7 @@ int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
 int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
 {
        struct xe_device *xe = guc_to_xe(guc);
-       struct xe_engine *e;
+       struct xe_exec_queue *q;
        u32 guc_id = msg[0];
 
        if (unlikely(len < 1)) {
@@ -1557,33 +1557,33 @@ int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
                return -EPROTO;
        }
 
-       e = g2h_engine_lookup(guc, guc_id);
-       if (unlikely(!e))
+       q = g2h_exec_queue_lookup(guc, guc_id);
+       if (unlikely(!q))
                return -EPROTO;
 
-       if (!engine_destroyed(e) || engine_pending_disable(e) ||
-           engine_pending_enable(e) || engine_enabled(e)) {
+       if (!exec_queue_destroyed(q) || exec_queue_pending_disable(q) ||
+           exec_queue_pending_enable(q) || exec_queue_enabled(q)) {
                drm_err(&xe->drm, "Unexpected engine state 0x%04x",
-                       atomic_read(&e->guc->state));
+                       atomic_read(&q->guc->state));
                return -EPROTO;
        }
 
-       trace_xe_engine_deregister_done(e);
+       trace_xe_exec_queue_deregister_done(q);
 
-       clear_engine_registered(e);
+       clear_exec_queue_registered(q);
 
-       if (engine_banned(e) || xe_engine_is_lr(e))
-               xe_engine_put(e);
+       if (exec_queue_banned(q) || xe_exec_queue_is_lr(q))
+               xe_exec_queue_put(q);
        else
-               __guc_engine_fini(guc, e);
+               __guc_exec_queue_fini(guc, q);
 
        return 0;
 }
 
-int xe_guc_engine_reset_handler(struct xe_guc *guc, u32 *msg, u32 len)
+int xe_guc_exec_queue_reset_handler(struct xe_guc *guc, u32 *msg, u32 len)
 {
        struct xe_device *xe = guc_to_xe(guc);
-       struct xe_engine *e;
+       struct xe_exec_queue *q;
        u32 guc_id = msg[0];
 
        if (unlikely(len < 1)) {
@@ -1591,34 +1591,34 @@ int xe_guc_engine_reset_handler(struct xe_guc *guc, u32 *msg, u32 len)
                return -EPROTO;
        }
 
-       e = g2h_engine_lookup(guc, guc_id);
-       if (unlikely(!e))
+       q = g2h_exec_queue_lookup(guc, guc_id);
+       if (unlikely(!q))
                return -EPROTO;
 
        drm_info(&xe->drm, "Engine reset: guc_id=%d", guc_id);
 
        /* FIXME: Do error capture, most likely async */
 
-       trace_xe_engine_reset(e);
+       trace_xe_exec_queue_reset(q);
 
        /*
         * A banned engine is a NOP at this point (came from
-        * guc_engine_timedout_job). Otherwise, kick drm scheduler to cancel
+        * guc_exec_queue_timedout_job). Otherwise, kick drm scheduler to cancel
         * jobs by setting timeout of the job to the minimum value kicking
-        * guc_engine_timedout_job.
+        * guc_exec_queue_timedout_job.
         */
-       set_engine_reset(e);
-       if (!engine_banned(e))
-               xe_guc_engine_trigger_cleanup(e);
+       set_exec_queue_reset(q);
+       if (!exec_queue_banned(q))
+               xe_guc_exec_queue_trigger_cleanup(q);
 
        return 0;
 }
 
-int xe_guc_engine_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
-                                          u32 len)
+int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
+                                              u32 len)
 {
        struct xe_device *xe = guc_to_xe(guc);
-       struct xe_engine *e;
+       struct xe_exec_queue *q;
        u32 guc_id = msg[0];
 
        if (unlikely(len < 1)) {
@@ -1626,22 +1626,22 @@ int xe_guc_engine_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
                return -EPROTO;
        }
 
-       e = g2h_engine_lookup(guc, guc_id);
-       if (unlikely(!e))
+       q = g2h_exec_queue_lookup(guc, guc_id);
+       if (unlikely(!q))
                return -EPROTO;
 
        drm_warn(&xe->drm, "Engine memory cat error: guc_id=%d", guc_id);
-       trace_xe_engine_memory_cat_error(e);
+       trace_xe_exec_queue_memory_cat_error(q);
 
        /* Treat the same as engine reset */
-       set_engine_reset(e);
-       if (!engine_banned(e))
-               xe_guc_engine_trigger_cleanup(e);
+       set_exec_queue_reset(q);
+       if (!exec_queue_banned(q))
+               xe_guc_exec_queue_trigger_cleanup(q);
 
        return 0;
 }
 
-int xe_guc_engine_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len)
+int xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len)
 {
        struct xe_device *xe = guc_to_xe(guc);
        u8 guc_class, instance;
@@ -1666,16 +1666,16 @@ int xe_guc_engine_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len)
 }
 
 static void
-guc_engine_wq_snapshot_capture(struct xe_engine *e,
-                              struct xe_guc_submit_engine_snapshot *snapshot)
+guc_exec_queue_wq_snapshot_capture(struct xe_exec_queue *q,
+                                  struct xe_guc_submit_exec_queue_snapshot *snapshot)
 {
-       struct xe_guc *guc = engine_to_guc(e);
+       struct xe_guc *guc = exec_queue_to_guc(q);
        struct xe_device *xe = guc_to_xe(guc);
-       struct iosys_map map = xe_lrc_parallel_map(e->lrc);
+       struct iosys_map map = xe_lrc_parallel_map(q->lrc);
        int i;
 
-       snapshot->guc.wqi_head = e->guc->wqi_head;
-       snapshot->guc.wqi_tail = e->guc->wqi_tail;
+       snapshot->guc.wqi_head = q->guc->wqi_head;
+       snapshot->guc.wqi_tail = q->guc->wqi_tail;
        snapshot->parallel.wq_desc.head = parallel_read(xe, map, wq_desc.head);
        snapshot->parallel.wq_desc.tail = parallel_read(xe, map, wq_desc.tail);
        snapshot->parallel.wq_desc.status = parallel_read(xe, map,
@@ -1692,8 +1692,8 @@ guc_engine_wq_snapshot_capture(struct xe_engine *e,
 }
 
 static void
-guc_engine_wq_snapshot_print(struct xe_guc_submit_engine_snapshot *snapshot,
-                            struct drm_printer *p)
+guc_exec_queue_wq_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snapshot,
+                                struct drm_printer *p)
 {
        int i;
 
@@ -1714,23 +1714,23 @@ guc_engine_wq_snapshot_print(struct xe_guc_submit_engine_snapshot *snapshot,
 }
 
 /**
- * xe_guc_engine_snapshot_capture - Take a quick snapshot of the GuC Engine.
- * @e: Xe Engine.
+ * xe_guc_exec_queue_snapshot_capture - Take a quick snapshot of the GuC Engine.
+ * @q: Xe exec queue.
  *
  * This can be printed out in a later stage like during dev_coredump
  * analysis.
  *
  * Returns: a GuC Submit Engine snapshot object that must be freed by the
- * caller, using `xe_guc_engine_snapshot_free`.
+ * caller, using `xe_guc_exec_queue_snapshot_free`.
  */
-struct xe_guc_submit_engine_snapshot *
-xe_guc_engine_snapshot_capture(struct xe_engine *e)
+struct xe_guc_submit_exec_queue_snapshot *
+xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q)
 {
-       struct xe_guc *guc = engine_to_guc(e);
+       struct xe_guc *guc = exec_queue_to_guc(q);
        struct xe_device *xe = guc_to_xe(guc);
-       struct xe_gpu_scheduler *sched = &e->guc->sched;
+       struct xe_gpu_scheduler *sched = &q->guc->sched;
        struct xe_sched_job *job;
-       struct xe_guc_submit_engine_snapshot *snapshot;
+       struct xe_guc_submit_exec_queue_snapshot *snapshot;
        int i;
 
        snapshot = kzalloc(sizeof(*snapshot), GFP_ATOMIC);
@@ -1740,25 +1740,25 @@ xe_guc_engine_snapshot_capture(struct xe_engine *e)
                return NULL;
        }
 
-       snapshot->guc.id = e->guc->id;
-       memcpy(&snapshot->name, &e->name, sizeof(snapshot->name));
-       snapshot->class = e->class;
-       snapshot->logical_mask = e->logical_mask;
-       snapshot->width = e->width;
-       snapshot->refcount = kref_read(&e->refcount);
+       snapshot->guc.id = q->guc->id;
+       memcpy(&snapshot->name, &q->name, sizeof(snapshot->name));
+       snapshot->class = q->class;
+       snapshot->logical_mask = q->logical_mask;
+       snapshot->width = q->width;
+       snapshot->refcount = kref_read(&q->refcount);
        snapshot->sched_timeout = sched->base.timeout;
-       snapshot->sched_props.timeslice_us = e->sched_props.timeslice_us;
+       snapshot->sched_props.timeslice_us = q->sched_props.timeslice_us;
        snapshot->sched_props.preempt_timeout_us =
-               e->sched_props.preempt_timeout_us;
+               q->sched_props.preempt_timeout_us;
 
-       snapshot->lrc = kmalloc_array(e->width, sizeof(struct lrc_snapshot),
+       snapshot->lrc = kmalloc_array(q->width, sizeof(struct lrc_snapshot),
                                      GFP_ATOMIC);
 
        if (!snapshot->lrc) {
                drm_err(&xe->drm, "Skipping GuC Engine LRC snapshot.\n");
        } else {
-               for (i = 0; i < e->width; ++i) {
-                       struct xe_lrc *lrc = e->lrc + i;
+               for (i = 0; i < q->width; ++i) {
+                       struct xe_lrc *lrc = q->lrc + i;
 
                        snapshot->lrc[i].context_desc =
                                lower_32_bits(xe_lrc_ggtt_addr(lrc));
@@ -1771,12 +1771,12 @@ xe_guc_engine_snapshot_capture(struct xe_engine *e)
                }
        }
 
-       snapshot->schedule_state = atomic_read(&e->guc->state);
-       snapshot->engine_flags = e->flags;
+       snapshot->schedule_state = atomic_read(&q->guc->state);
+       snapshot->exec_queue_flags = q->flags;
 
-       snapshot->parallel_execution = xe_engine_is_parallel(e);
+       snapshot->parallel_execution = xe_exec_queue_is_parallel(q);
        if (snapshot->parallel_execution)
-               guc_engine_wq_snapshot_capture(e, snapshot);
+               guc_exec_queue_wq_snapshot_capture(q, snapshot);
 
        spin_lock(&sched->base.job_list_lock);
        snapshot->pending_list_size = list_count_nodes(&sched->base.pending_list);
@@ -1806,15 +1806,15 @@ xe_guc_engine_snapshot_capture(struct xe_engine *e)
 }
 
 /**
- * xe_guc_engine_snapshot_print - Print out a given GuC Engine snapshot.
+ * xe_guc_exec_queue_snapshot_print - Print out a given GuC Engine snapshot.
  * @snapshot: GuC Submit Engine snapshot object.
  * @p: drm_printer where it will be printed out.
  *
  * This function prints out a given GuC Submit Engine snapshot object.
  */
 void
-xe_guc_engine_snapshot_print(struct xe_guc_submit_engine_snapshot *snapshot,
-                            struct drm_printer *p)
+xe_guc_exec_queue_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snapshot,
+                                struct drm_printer *p)
 {
        int i;
 
@@ -1846,10 +1846,10 @@ xe_guc_engine_snapshot_print(struct xe_guc_submit_engine_snapshot *snapshot,
                drm_printf(p, "\tSeqno: (memory) %d\n", snapshot->lrc[i].seqno);
        }
        drm_printf(p, "\tSchedule State: 0x%x\n", snapshot->schedule_state);
-       drm_printf(p, "\tFlags: 0x%lx\n", snapshot->engine_flags);
+       drm_printf(p, "\tFlags: 0x%lx\n", snapshot->exec_queue_flags);
 
        if (snapshot->parallel_execution)
-               guc_engine_wq_snapshot_print(snapshot, p);
+               guc_exec_queue_wq_snapshot_print(snapshot, p);
 
        for (i = 0; snapshot->pending_list && i < snapshot->pending_list_size;
             i++)
@@ -1860,14 +1860,14 @@ xe_guc_engine_snapshot_print(struct xe_guc_submit_engine_snapshot *snapshot,
 }
 
 /**
- * xe_guc_engine_snapshot_free - Free all allocated objects for a given
+ * xe_guc_exec_queue_snapshot_free - Free all allocated objects for a given
  * snapshot.
  * @snapshot: GuC Submit Engine snapshot object.
  *
  * This function free all the memory that needed to be allocated at capture
  * time.
  */
-void xe_guc_engine_snapshot_free(struct xe_guc_submit_engine_snapshot *snapshot)
+void xe_guc_exec_queue_snapshot_free(struct xe_guc_submit_exec_queue_snapshot *snapshot)
 {
        if (!snapshot)
                return;
@@ -1877,13 +1877,13 @@ void xe_guc_engine_snapshot_free(struct xe_guc_submit_engine_snapshot *snapshot)
        kfree(snapshot);
 }
 
-static void guc_engine_print(struct xe_engine *e, struct drm_printer *p)
+static void guc_exec_queue_print(struct xe_exec_queue *q, struct drm_printer *p)
 {
-       struct xe_guc_submit_engine_snapshot *snapshot;
+       struct xe_guc_submit_exec_queue_snapshot *snapshot;
 
-       snapshot = xe_guc_engine_snapshot_capture(e);
-       xe_guc_engine_snapshot_print(snapshot, p);
-       xe_guc_engine_snapshot_free(snapshot);
+       snapshot = xe_guc_exec_queue_snapshot_capture(q);
+       xe_guc_exec_queue_snapshot_print(snapshot, p);
+       xe_guc_exec_queue_snapshot_free(snapshot);
 }
 
 /**
@@ -1895,14 +1895,14 @@ static void guc_engine_print(struct xe_engine *e, struct drm_printer *p)
  */
 void xe_guc_submit_print(struct xe_guc *guc, struct drm_printer *p)
 {
-       struct xe_engine *e;
+       struct xe_exec_queue *q;
        unsigned long index;
 
        if (!xe_device_guc_submission_enabled(guc_to_xe(guc)))
                return;
 
        mutex_lock(&guc->submission_state.lock);
-       xa_for_each(&guc->submission_state.engine_lookup, index, e)
-               guc_engine_print(e, p);
+       xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
+               guc_exec_queue_print(q, p);
        mutex_unlock(&guc->submission_state.lock);
 }
index 4153c2d2201307d4be68cf698a655e2a7b076a27..fc97869c5b865d59d998f0adc55d004655fa269f 100644 (file)
@@ -9,7 +9,7 @@
 #include <linux/types.h>
 
 struct drm_printer;
-struct xe_engine;
+struct xe_exec_queue;
 struct xe_guc;
 
 int xe_guc_submit_init(struct xe_guc *guc);
@@ -21,18 +21,18 @@ int xe_guc_submit_start(struct xe_guc *guc);
 
 int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
 int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
-int xe_guc_engine_reset_handler(struct xe_guc *guc, u32 *msg, u32 len);
-int xe_guc_engine_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
-                                          u32 len);
-int xe_guc_engine_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len);
+int xe_guc_exec_queue_reset_handler(struct xe_guc *guc, u32 *msg, u32 len);
+int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
+                                              u32 len);
+int xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len);
 
-struct xe_guc_submit_engine_snapshot *
-xe_guc_engine_snapshot_capture(struct xe_engine *e);
+struct xe_guc_submit_exec_queue_snapshot *
+xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q);
 void
-xe_guc_engine_snapshot_print(struct xe_guc_submit_engine_snapshot *snapshot,
-                            struct drm_printer *p);
+xe_guc_exec_queue_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snapshot,
+                                struct drm_printer *p);
 void
-xe_guc_engine_snapshot_free(struct xe_guc_submit_engine_snapshot *snapshot);
+xe_guc_exec_queue_snapshot_free(struct xe_guc_submit_exec_queue_snapshot *snapshot);
 void xe_guc_submit_print(struct xe_guc *guc, struct drm_printer *p);
 
 #endif
index 6765b2c6eab15b1e97e62d78e2e0b7f2f52ceb58..649b0a85269219405f1a66a5b11f96d1de88f72a 100644 (file)
@@ -79,20 +79,20 @@ struct pending_list_snapshot {
 };
 
 /**
- * struct xe_guc_submit_engine_snapshot - Snapshot for devcoredump
+ * struct xe_guc_submit_exec_queue_snapshot - Snapshot for devcoredump
  */
-struct xe_guc_submit_engine_snapshot {
-       /** @name: name of this engine */
+struct xe_guc_submit_exec_queue_snapshot {
+       /** @name: name of this exec queue */
        char name[MAX_FENCE_NAME_LEN];
-       /** @class: class of this engine */
+       /** @class: class of this exec queue */
        enum xe_engine_class class;
        /**
-        * @logical_mask: logical mask of where job submitted to engine can run
+        * @logical_mask: logical mask of where job submitted to exec queue can run
         */
        u32 logical_mask;
-       /** @width: width (number BB submitted per exec) of this engine */
+       /** @width: width (number BB submitted per exec) of this exec queue */
        u16 width;
-       /** @refcount: ref count of this engine */
+       /** @refcount: ref count of this exec queue */
        u32 refcount;
        /**
         * @sched_timeout: the time after which a job is removed from the
@@ -113,8 +113,8 @@ struct xe_guc_submit_engine_snapshot {
 
        /** @schedule_state: Schedule State at the moment of Crash */
        u32 schedule_state;
-       /** @engine_flags: Flags of the faulty engine */
-       unsigned long engine_flags;
+       /** @exec_queue_flags: Flags of the faulty exec_queue */
+       unsigned long exec_queue_flags;
 
        /** @guc: GuC Engine Snapshot */
        struct {
@@ -122,7 +122,7 @@ struct xe_guc_submit_engine_snapshot {
                u32 wqi_head;
                /** @wqi_tail: work queue item tail */
                u32 wqi_tail;
-               /** @id: GuC id for this xe_engine */
+               /** @id: GuC id for this exec_queue */
                u16 id;
        } guc;
 
index a304dce4e9f4dd5a7298e1cfa32ef2d0641c3a0a..a5e58917a4994bcd7f354a74c2d9daee3b3f5039 100644 (file)
@@ -33,8 +33,8 @@ struct xe_guc {
        struct xe_guc_pc pc;
        /** @submission_state: GuC submission state */
        struct {
-               /** @engine_lookup: Lookup an xe_engine from guc_id */
-               struct xarray engine_lookup;
+               /** @exec_queue_lookup: Lookup an xe_engine from guc_id */
+               struct xarray exec_queue_lookup;
                /** @guc_ids: used to allocate new guc_ids, single-lrc */
                struct ida guc_ids;
                /** @guc_ids_bitmap: used to allocate new guc_ids, multi-lrc */
index 05f3d8d6837932b07a522983591c8a83b6d4c29b..09db8da261a399d9a6aabf5503f39884a9fc09ef 100644 (file)
@@ -12,7 +12,7 @@
 #include "regs/xe_regs.h"
 #include "xe_bo.h"
 #include "xe_device.h"
-#include "xe_engine_types.h"
+#include "xe_exec_queue_types.h"
 #include "xe_gt.h"
 #include "xe_hw_fence.h"
 #include "xe_map.h"
@@ -604,7 +604,7 @@ static void xe_lrc_set_ppgtt(struct xe_lrc *lrc, struct xe_vm *vm)
 #define ACC_NOTIFY_S            16
 
 int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
-               struct xe_engine *e, struct xe_vm *vm, u32 ring_size)
+               struct xe_exec_queue *q, struct xe_vm *vm, u32 ring_size)
 {
        struct xe_gt *gt = hwe->gt;
        struct xe_tile *tile = gt_to_tile(gt);
@@ -669,12 +669,12 @@ int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
                             RING_CTL_SIZE(lrc->ring.size) | RING_VALID);
        if (xe->info.has_asid && vm)
                xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID,
-                                    (e->usm.acc_granularity <<
+                                    (q->usm.acc_granularity <<
                                      ACC_GRANULARITY_S) | vm->usm.asid);
        if (xe->info.supports_usm && vm)
                xe_lrc_write_ctx_reg(lrc, PVC_CTX_ACC_CTR_THOLD,
-                                    (e->usm.acc_notify << ACC_NOTIFY_S) |
-                                    e->usm.acc_trigger);
+                                    (q->usm.acc_notify << ACC_NOTIFY_S) |
+                                    q->usm.acc_trigger);
 
        lrc->desc = GEN8_CTX_VALID;
        lrc->desc |= INTEL_LEGACY_64B_CONTEXT << GEN8_CTX_ADDRESSING_MODE_SHIFT;
index e37f89e75ef80b1956667346ced44c5348d7c94f..3a6e8fc5a8370232aa9e9f87c5c1962804d9cdc2 100644 (file)
@@ -8,7 +8,7 @@
 #include "xe_lrc_types.h"
 
 struct xe_device;
-struct xe_engine;
+struct xe_exec_queue;
 enum xe_engine_class;
 struct xe_hw_engine;
 struct xe_vm;
@@ -16,7 +16,7 @@ struct xe_vm;
 #define LRC_PPHWSP_SCRATCH_ADDR (0x34 * 4)
 
 int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
-               struct xe_engine *e, struct xe_vm *vm, u32 ring_size);
+               struct xe_exec_queue *q, struct xe_vm *vm, u32 ring_size);
 void xe_lrc_finish(struct xe_lrc *lrc);
 
 size_t xe_lrc_size(struct xe_device *xe, enum xe_engine_class class);
index 60f7226c92ff3c9375d6042d210173a09996d3e1..d0816d2090f076f472823365ad8991b95d024c97 100644 (file)
@@ -34,8 +34,8 @@
  * struct xe_migrate - migrate context.
  */
 struct xe_migrate {
-       /** @eng: Default engine used for migration */
-       struct xe_engine *eng;
+       /** @q: Default exec queue used for migration */
+       struct xe_exec_queue *q;
        /** @tile: Backpointer to the tile this struct xe_migrate belongs to. */
        struct xe_tile *tile;
        /** @job_mutex: Timeline mutex for @eng. */
@@ -78,9 +78,9 @@ struct xe_migrate {
  *
  * Return: The default migrate engine
  */
-struct xe_engine *xe_tile_migrate_engine(struct xe_tile *tile)
+struct xe_exec_queue *xe_tile_migrate_engine(struct xe_tile *tile)
 {
-       return tile->migrate->eng;
+       return tile->migrate->q;
 }
 
 static void xe_migrate_fini(struct drm_device *dev, void *arg)
@@ -88,11 +88,11 @@ static void xe_migrate_fini(struct drm_device *dev, void *arg)
        struct xe_migrate *m = arg;
        struct ww_acquire_ctx ww;
 
-       xe_vm_lock(m->eng->vm, &ww, 0, false);
+       xe_vm_lock(m->q->vm, &ww, 0, false);
        xe_bo_unpin(m->pt_bo);
        if (m->cleared_bo)
                xe_bo_unpin(m->cleared_bo);
-       xe_vm_unlock(m->eng->vm, &ww);
+       xe_vm_unlock(m->q->vm, &ww);
 
        dma_fence_put(m->fence);
        if (m->cleared_bo)
@@ -100,8 +100,8 @@ static void xe_migrate_fini(struct drm_device *dev, void *arg)
        xe_bo_put(m->pt_bo);
        drm_suballoc_manager_fini(&m->vm_update_sa);
        mutex_destroy(&m->job_mutex);
-       xe_vm_close_and_put(m->eng->vm);
-       xe_engine_put(m->eng);
+       xe_vm_close_and_put(m->q->vm);
+       xe_exec_queue_put(m->q);
 }
 
 static u64 xe_migrate_vm_addr(u64 slot, u32 level)
@@ -341,20 +341,20 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
                if (!hwe)
                        return ERR_PTR(-EINVAL);
 
-               m->eng = xe_engine_create(xe, vm,
-                                         BIT(hwe->logical_instance), 1,
-                                         hwe, ENGINE_FLAG_KERNEL);
+               m->q = xe_exec_queue_create(xe, vm,
+                                           BIT(hwe->logical_instance), 1,
+                                           hwe, EXEC_QUEUE_FLAG_KERNEL);
        } else {
-               m->eng = xe_engine_create_class(xe, primary_gt, vm,
-                                               XE_ENGINE_CLASS_COPY,
-                                               ENGINE_FLAG_KERNEL);
+               m->q = xe_exec_queue_create_class(xe, primary_gt, vm,
+                                                 XE_ENGINE_CLASS_COPY,
+                                                 EXEC_QUEUE_FLAG_KERNEL);
        }
-       if (IS_ERR(m->eng)) {
+       if (IS_ERR(m->q)) {
                xe_vm_close_and_put(vm);
-               return ERR_CAST(m->eng);
+               return ERR_CAST(m->q);
        }
        if (xe->info.supports_usm)
-               m->eng->priority = XE_ENGINE_PRIORITY_KERNEL;
+               m->q->priority = XE_EXEC_QUEUE_PRIORITY_KERNEL;
 
        mutex_init(&m->job_mutex);
 
@@ -456,7 +456,7 @@ static void emit_pte(struct xe_migrate *m,
                        addr = xe_res_dma(cur) & PAGE_MASK;
                        if (is_vram) {
                                /* Is this a 64K PTE entry? */
-                               if ((m->eng->vm->flags & XE_VM_FLAG_64K) &&
+                               if ((m->q->vm->flags & XE_VM_FLAG_64K) &&
                                    !(cur_ofs & (16 * 8 - 1))) {
                                        XE_WARN_ON(!IS_ALIGNED(addr, SZ_64K));
                                        addr |= XE_PTE_PS64;
@@ -714,7 +714,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
                                                  src_L0, ccs_ofs, copy_ccs);
 
                mutex_lock(&m->job_mutex);
-               job = xe_bb_create_migration_job(m->eng, bb,
+               job = xe_bb_create_migration_job(m->q, bb,
                                                 xe_migrate_batch_base(m, usm),
                                                 update_idx);
                if (IS_ERR(job)) {
@@ -938,7 +938,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
                }
 
                mutex_lock(&m->job_mutex);
-               job = xe_bb_create_migration_job(m->eng, bb,
+               job = xe_bb_create_migration_job(m->q, bb,
                                                 xe_migrate_batch_base(m, usm),
                                                 update_idx);
                if (IS_ERR(job)) {
@@ -1024,7 +1024,7 @@ static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
 
 struct xe_vm *xe_migrate_get_vm(struct xe_migrate *m)
 {
-       return xe_vm_get(m->eng->vm);
+       return xe_vm_get(m->q->vm);
 }
 
 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
@@ -1106,7 +1106,7 @@ static bool no_in_syncs(struct xe_sync_entry *syncs, u32 num_syncs)
  * @m: The migrate context.
  * @vm: The vm we'll be updating.
  * @bo: The bo whose dma-resv we will await before updating, or NULL if userptr.
- * @eng: The engine to be used for the update or NULL if the default
+ * @q: The exec queue to be used for the update or NULL if the default
  * migration engine is to be used.
  * @updates: An array of update descriptors.
  * @num_updates: Number of descriptors in @updates.
@@ -1132,7 +1132,7 @@ struct dma_fence *
 xe_migrate_update_pgtables(struct xe_migrate *m,
                           struct xe_vm *vm,
                           struct xe_bo *bo,
-                          struct xe_engine *eng,
+                          struct xe_exec_queue *q,
                           const struct xe_vm_pgtable_update *updates,
                           u32 num_updates,
                           struct xe_sync_entry *syncs, u32 num_syncs,
@@ -1150,13 +1150,13 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
        u32 i, batch_size, ppgtt_ofs, update_idx, page_ofs = 0;
        u64 addr;
        int err = 0;
-       bool usm = !eng && xe->info.supports_usm;
+       bool usm = !q && xe->info.supports_usm;
        bool first_munmap_rebind = vma &&
                vma->gpuva.flags & XE_VMA_FIRST_REBIND;
-       struct xe_engine *eng_override = !eng ? m->eng : eng;
+       struct xe_exec_queue *q_override = !q ? m->q : q;
 
        /* Use the CPU if no in syncs and engine is idle */
-       if (no_in_syncs(syncs, num_syncs) && xe_engine_is_idle(eng_override)) {
+       if (no_in_syncs(syncs, num_syncs) && xe_exec_queue_is_idle(q_override)) {
                fence =  xe_migrate_update_pgtables_cpu(m, vm, bo, updates,
                                                        num_updates,
                                                        first_munmap_rebind,
@@ -1186,14 +1186,14 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
         */
        XE_WARN_ON(batch_size >= SZ_128K);
 
-       bb = xe_bb_new(gt, batch_size, !eng && xe->info.supports_usm);
+       bb = xe_bb_new(gt, batch_size, !q && xe->info.supports_usm);
        if (IS_ERR(bb))
                return ERR_CAST(bb);
 
        /* For sysmem PTE's, need to map them in our hole.. */
        if (!IS_DGFX(xe)) {
                ppgtt_ofs = NUM_KERNEL_PDE - 1;
-               if (eng) {
+               if (q) {
                        XE_WARN_ON(num_updates > NUM_VMUSA_WRITES_PER_UNIT);
 
                        sa_bo = drm_suballoc_new(&m->vm_update_sa, 1,
@@ -1249,10 +1249,10 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
                        write_pgtable(tile, bb, 0, &updates[i], pt_update);
        }
 
-       if (!eng)
+       if (!q)
                mutex_lock(&m->job_mutex);
 
-       job = xe_bb_create_migration_job(eng ?: m->eng, bb,
+       job = xe_bb_create_migration_job(q ?: m->q, bb,
                                         xe_migrate_batch_base(m, usm),
                                         update_idx);
        if (IS_ERR(job)) {
@@ -1295,7 +1295,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
        fence = dma_fence_get(&job->drm.s_fence->finished);
        xe_sched_job_push(job);
 
-       if (!eng)
+       if (!q)
                mutex_unlock(&m->job_mutex);
 
        xe_bb_free(bb, fence);
@@ -1306,7 +1306,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
 err_job:
        xe_sched_job_put(job);
 err_bb:
-       if (!eng)
+       if (!q)
                mutex_unlock(&m->job_mutex);
        xe_bb_free(bb, NULL);
 err:
index 0d62aff6421c5b07983f3fb19e58fc19aafbcde4..c729241776adfbe522bb7355a44c1c81dd388708 100644 (file)
@@ -14,7 +14,7 @@ struct ttm_resource;
 
 struct xe_bo;
 struct xe_gt;
-struct xe_engine;
+struct xe_exec_queue;
 struct xe_migrate;
 struct xe_migrate_pt_update;
 struct xe_sync_entry;
@@ -97,7 +97,7 @@ struct dma_fence *
 xe_migrate_update_pgtables(struct xe_migrate *m,
                           struct xe_vm *vm,
                           struct xe_bo *bo,
-                          struct xe_engine *eng,
+                          struct xe_exec_queue *q,
                           const struct xe_vm_pgtable_update *updates,
                           u32 num_updates,
                           struct xe_sync_entry *syncs, u32 num_syncs,
@@ -105,5 +105,5 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
 
 void xe_migrate_wait(struct xe_migrate *m);
 
-struct xe_engine *xe_tile_migrate_engine(struct xe_tile *tile);
+struct xe_exec_queue *xe_tile_migrate_engine(struct xe_tile *tile);
 #endif
index 25f7b35a76dafebd144305e9e1d8a548f55fcb78..d0f1ec4b0336a895bcbc527e8bc51657f89e0a85 100644 (file)
@@ -8,7 +8,7 @@
 
 #include <linux/types.h>
 
-struct xe_engine;
+struct xe_exec_queue;
 struct xe_gt;
 
 void xe_mocs_init_early(struct xe_gt *gt);
index e86604e0174da9ae2cb2b1e9624e7aaf7a248bf5..7bce2a332603c086bf4bed63c212cdff311f6bbf 100644 (file)
@@ -15,19 +15,19 @@ static void preempt_fence_work_func(struct work_struct *w)
        bool cookie = dma_fence_begin_signalling();
        struct xe_preempt_fence *pfence =
                container_of(w, typeof(*pfence), preempt_work);
-       struct xe_engine *e = pfence->engine;
+       struct xe_exec_queue *q = pfence->q;
 
        if (pfence->error)
                dma_fence_set_error(&pfence->base, pfence->error);
        else
-               e->ops->suspend_wait(e);
+               q->ops->suspend_wait(q);
 
        dma_fence_signal(&pfence->base);
        dma_fence_end_signalling(cookie);
 
-       xe_vm_queue_rebind_worker(e->vm);
+       xe_vm_queue_rebind_worker(q->vm);
 
-       xe_engine_put(e);
+       xe_exec_queue_put(q);
 }
 
 static const char *
@@ -46,9 +46,9 @@ static bool preempt_fence_enable_signaling(struct dma_fence *fence)
 {
        struct xe_preempt_fence *pfence =
                container_of(fence, typeof(*pfence), base);
-       struct xe_engine *e = pfence->engine;
+       struct xe_exec_queue *q = pfence->q;
 
-       pfence->error = e->ops->suspend(e);
+       pfence->error = q->ops->suspend(q);
        queue_work(system_unbound_wq, &pfence->preempt_work);
        return true;
 }
@@ -104,43 +104,43 @@ void xe_preempt_fence_free(struct xe_preempt_fence *pfence)
  * xe_preempt_fence_alloc().
  * @pfence: The struct xe_preempt_fence pointer returned from
  *          xe_preempt_fence_alloc().
- * @e: The struct xe_engine used for arming.
+ * @q: The struct xe_exec_queue used for arming.
  * @context: The dma-fence context used for arming.
  * @seqno: The dma-fence seqno used for arming.
  *
  * Inserts the preempt fence into @context's timeline, takes @link off any
- * list, and registers the struct xe_engine as the xe_engine to be preempted.
+ * list, and registers the struct xe_exec_queue as the xe_engine to be preempted.
  *
  * Return: A pointer to a struct dma_fence embedded into the preempt fence.
  * This function doesn't error.
  */
 struct dma_fence *
-xe_preempt_fence_arm(struct xe_preempt_fence *pfence, struct xe_engine *e,
+xe_preempt_fence_arm(struct xe_preempt_fence *pfence, struct xe_exec_queue *q,
                     u64 context, u32 seqno)
 {
        list_del_init(&pfence->link);
-       pfence->engine = xe_engine_get(e);
+       pfence->q = xe_exec_queue_get(q);
        dma_fence_init(&pfence->base, &preempt_fence_ops,
-                     &e->compute.lock, context, seqno);
+                     &q->compute.lock, context, seqno);
 
        return &pfence->base;
 }
 
 /**
  * xe_preempt_fence_create() - Helper to create and arm a preempt fence.
- * @e: The struct xe_engine used for arming.
+ * @q: The struct xe_exec_queue used for arming.
  * @context: The dma-fence context used for arming.
  * @seqno: The dma-fence seqno used for arming.
  *
  * Allocates and inserts the preempt fence into @context's timeline,
- * and registers @e as the struct xe_engine to be preempted.
+ * and registers @e as the struct xe_exec_queue to be preempted.
  *
  * Return: A pointer to the resulting struct dma_fence on success. An error
  * pointer on error. In particular if allocation fails it returns
  * ERR_PTR(-ENOMEM);
  */
 struct dma_fence *
-xe_preempt_fence_create(struct xe_engine *e,
+xe_preempt_fence_create(struct xe_exec_queue *q,
                        u64 context, u32 seqno)
 {
        struct xe_preempt_fence *pfence;
@@ -149,7 +149,7 @@ xe_preempt_fence_create(struct xe_engine *e,
        if (IS_ERR(pfence))
                return ERR_CAST(pfence);
 
-       return xe_preempt_fence_arm(pfence, e, context, seqno);
+       return xe_preempt_fence_arm(pfence, q, context, seqno);
 }
 
 bool xe_fence_is_xe_preempt(const struct dma_fence *fence)
index 4f3966103203c7bef0778e50122a11582f089e1b..9406c6fea525a2e5272b9dcbc8baf0a13bbb45da 100644 (file)
@@ -11,7 +11,7 @@
 struct list_head;
 
 struct dma_fence *
-xe_preempt_fence_create(struct xe_engine *e,
+xe_preempt_fence_create(struct xe_exec_queue *q,
                        u64 context, u32 seqno);
 
 struct xe_preempt_fence *xe_preempt_fence_alloc(void);
@@ -19,7 +19,7 @@ struct xe_preempt_fence *xe_preempt_fence_alloc(void);
 void xe_preempt_fence_free(struct xe_preempt_fence *pfence);
 
 struct dma_fence *
-xe_preempt_fence_arm(struct xe_preempt_fence *pfence, struct xe_engine *e,
+xe_preempt_fence_arm(struct xe_preempt_fence *pfence, struct xe_exec_queue *q,
                     u64 context, u32 seqno);
 
 static inline struct xe_preempt_fence *
index 9d9efd8ff0ed2bd1964545716b878a9765f66486..b54b5c29b5331e7a0c445abbc10551afa76525fd 100644 (file)
@@ -9,12 +9,11 @@
 #include <linux/dma-fence.h>
 #include <linux/workqueue.h>
 
-struct xe_engine;
+struct xe_exec_queue;
 
 /**
  * struct xe_preempt_fence - XE preempt fence
  *
- * A preemption fence which suspends the execution of an xe_engine on the
  * hardware and triggers a callback once the xe_engine is complete.
  */
 struct xe_preempt_fence {
@@ -22,8 +21,8 @@ struct xe_preempt_fence {
        struct dma_fence base;
        /** @link: link into list of pending preempt fences */
        struct list_head link;
-       /** @engine: xe engine for this preempt fence */
-       struct xe_engine *engine;
+       /** @q: exec queue for this preempt fence */
+       struct xe_exec_queue *q;
        /** @preempt_work: work struct which issues preemption */
        struct work_struct preempt_work;
        /** @error: preempt fence is in error state */
index b82ce01cc4cb986289e1144df0ff1d2a351548d4..c21d2681b4196ea9c6957824bc5bc4e7171b6b6a 100644 (file)
@@ -1307,7 +1307,7 @@ static void xe_pt_calc_rfence_interval(struct xe_vma *vma,
  * address range.
  * @tile: The tile to bind for.
  * @vma: The vma to bind.
- * @e: The engine with which to do pipelined page-table updates.
+ * @q: The exec_queue with which to do pipelined page-table updates.
  * @syncs: Entries to sync on before binding the built tree to the live vm tree.
  * @num_syncs: Number of @sync entries.
  * @rebind: Whether we're rebinding this vma to the same address range without
@@ -1325,7 +1325,7 @@ static void xe_pt_calc_rfence_interval(struct xe_vma *vma,
  * on success, an error pointer on error.
  */
 struct dma_fence *
-__xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e,
+__xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
                 struct xe_sync_entry *syncs, u32 num_syncs,
                 bool rebind)
 {
@@ -1351,7 +1351,7 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e,
 
        vm_dbg(&xe_vma_vm(vma)->xe->drm,
               "Preparing bind, with range [%llx...%llx) engine %p.\n",
-              xe_vma_start(vma), xe_vma_end(vma) - 1, e);
+              xe_vma_start(vma), xe_vma_end(vma) - 1, q);
 
        err = xe_pt_prepare_bind(tile, vma, entries, &num_entries, rebind);
        if (err)
@@ -1388,7 +1388,7 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e,
        }
 
        fence = xe_migrate_update_pgtables(tile->migrate,
-                                          vm, xe_vma_bo(vma), e,
+                                          vm, xe_vma_bo(vma), q,
                                           entries, num_entries,
                                           syncs, num_syncs,
                                           &bind_pt_update.base);
@@ -1663,7 +1663,7 @@ static const struct xe_migrate_pt_update_ops userptr_unbind_ops = {
  * address range.
  * @tile: The tile to unbind for.
  * @vma: The vma to unbind.
- * @e: The engine with which to do pipelined page-table updates.
+ * @q: The exec_queue with which to do pipelined page-table updates.
  * @syncs: Entries to sync on before disconnecting the tree to be destroyed.
  * @num_syncs: Number of @sync entries.
  *
@@ -1679,7 +1679,7 @@ static const struct xe_migrate_pt_update_ops userptr_unbind_ops = {
  * on success, an error pointer on error.
  */
 struct dma_fence *
-__xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e,
+__xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
                   struct xe_sync_entry *syncs, u32 num_syncs)
 {
        struct xe_vm_pgtable_update entries[XE_VM_MAX_LEVEL * 2 + 1];
@@ -1704,7 +1704,7 @@ __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e
 
        vm_dbg(&xe_vma_vm(vma)->xe->drm,
               "Preparing unbind, with range [%llx...%llx) engine %p.\n",
-              xe_vma_start(vma), xe_vma_end(vma) - 1, e);
+              xe_vma_start(vma), xe_vma_end(vma) - 1, q);
 
        num_entries = xe_pt_stage_unbind(tile, vma, entries);
        XE_WARN_ON(num_entries > ARRAY_SIZE(entries));
@@ -1729,8 +1729,8 @@ __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e
         * lower level, because it needs to be more conservative.
         */
        fence = xe_migrate_update_pgtables(tile->migrate,
-                                          vm, NULL, e ? e :
-                                          vm->eng[tile->id],
+                                          vm, NULL, q ? q :
+                                          vm->q[tile->id],
                                           entries, num_entries,
                                           syncs, num_syncs,
                                           &unbind_pt_update.base);
index bbb00d6461ffd48955ca93d610c88a1056cc807f..01be7ab08f87ec1c61bb19f875ccf26b8b9e677a 100644 (file)
@@ -12,7 +12,7 @@
 struct dma_fence;
 struct xe_bo;
 struct xe_device;
-struct xe_engine;
+struct xe_exec_queue;
 struct xe_sync_entry;
 struct xe_tile;
 struct xe_vm;
@@ -35,12 +35,12 @@ void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm,
 void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred);
 
 struct dma_fence *
-__xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e,
+__xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
                 struct xe_sync_entry *syncs, u32 num_syncs,
                 bool rebind);
 
 struct dma_fence *
-__xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e,
+__xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
                   struct xe_sync_entry *syncs, u32 num_syncs);
 
 bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma);
index 382851f436b79b86f39f59e5800aa779a0a2d9c7..7ea235c71385f4fd3cdbf31e585408ff582999b7 100644 (file)
@@ -203,7 +203,7 @@ static int query_config(struct xe_device *xe, struct drm_xe_device_query *query)
        config->info[XE_QUERY_CONFIG_MEM_REGION_COUNT] =
                hweight_long(xe->info.mem_region_mask);
        config->info[XE_QUERY_CONFIG_MAX_ENGINE_PRIORITY] =
-               xe_engine_device_get_max_priority(xe);
+               xe_exec_queue_device_get_max_priority(xe);
 
        if (copy_to_user(query_ptr, config, size)) {
                kfree(config);
index 2d0d392cd691601ad2a2349c2b14e827a93147bf..6346ed24e2793d7844f7521d14686f86e48eebac 100644 (file)
@@ -10,7 +10,7 @@
 #include "regs/xe_gt_regs.h"
 #include "regs/xe_lrc_layout.h"
 #include "regs/xe_regs.h"
-#include "xe_engine_types.h"
+#include "xe_exec_queue_types.h"
 #include "xe_gt.h"
 #include "xe_lrc.h"
 #include "xe_macros.h"
@@ -156,7 +156,7 @@ static int emit_store_imm_ppgtt_posted(u64 addr, u64 value,
 
 static int emit_render_cache_flush(struct xe_sched_job *job, u32 *dw, int i)
 {
-       struct xe_gt *gt = job->engine->gt;
+       struct xe_gt *gt = job->q->gt;
        bool lacks_render = !(gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK);
        u32 flags;
 
@@ -172,7 +172,7 @@ static int emit_render_cache_flush(struct xe_sched_job *job, u32 *dw, int i)
 
        if (lacks_render)
                flags &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
-       else if (job->engine->class == XE_ENGINE_CLASS_COMPUTE)
+       else if (job->q->class == XE_ENGINE_CLASS_COMPUTE)
                flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
 
        dw[i++] = GFX_OP_PIPE_CONTROL(6) | PIPE_CONTROL0_HDC_PIPELINE_FLUSH;
@@ -202,7 +202,7 @@ static int emit_pipe_imm_ggtt(u32 addr, u32 value, bool stall_only, u32 *dw,
 
 static u32 get_ppgtt_flag(struct xe_sched_job *job)
 {
-       return !(job->engine->flags & ENGINE_FLAG_WA) ? BIT(8) : 0;
+       return !(job->q->flags & EXEC_QUEUE_FLAG_WA) ? BIT(8) : 0;
 }
 
 static void __emit_job_gen12_copy(struct xe_sched_job *job, struct xe_lrc *lrc,
@@ -210,7 +210,7 @@ static void __emit_job_gen12_copy(struct xe_sched_job *job, struct xe_lrc *lrc,
 {
        u32 dw[MAX_JOB_SIZE_DW], i = 0;
        u32 ppgtt_flag = get_ppgtt_flag(job);
-       struct xe_vm *vm = job->engine->vm;
+       struct xe_vm *vm = job->q->vm;
 
        if (vm->batch_invalidate_tlb) {
                dw[i++] = preparser_disable(true);
@@ -255,10 +255,10 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
 {
        u32 dw[MAX_JOB_SIZE_DW], i = 0;
        u32 ppgtt_flag = get_ppgtt_flag(job);
-       struct xe_gt *gt = job->engine->gt;
+       struct xe_gt *gt = job->q->gt;
        struct xe_device *xe = gt_to_xe(gt);
-       bool decode = job->engine->class == XE_ENGINE_CLASS_VIDEO_DECODE;
-       struct xe_vm *vm = job->engine->vm;
+       bool decode = job->q->class == XE_ENGINE_CLASS_VIDEO_DECODE;
+       struct xe_vm *vm = job->q->vm;
 
        dw[i++] = preparser_disable(true);
 
@@ -302,16 +302,16 @@ static void __emit_job_gen12_render_compute(struct xe_sched_job *job,
 {
        u32 dw[MAX_JOB_SIZE_DW], i = 0;
        u32 ppgtt_flag = get_ppgtt_flag(job);
-       struct xe_gt *gt = job->engine->gt;
+       struct xe_gt *gt = job->q->gt;
        struct xe_device *xe = gt_to_xe(gt);
        bool lacks_render = !(gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK);
-       struct xe_vm *vm = job->engine->vm;
+       struct xe_vm *vm = job->q->vm;
        u32 mask_flags = 0;
 
        dw[i++] = preparser_disable(true);
        if (lacks_render)
                mask_flags = PIPE_CONTROL_3D_ARCH_FLAGS;
-       else if (job->engine->class == XE_ENGINE_CLASS_COMPUTE)
+       else if (job->q->class == XE_ENGINE_CLASS_COMPUTE)
                mask_flags = PIPE_CONTROL_3D_ENGINE_FLAGS;
 
        /* See __xe_pt_bind_vma() for a discussion on TLB invalidations. */
@@ -378,14 +378,14 @@ static void emit_job_gen12_copy(struct xe_sched_job *job)
 {
        int i;
 
-       if (xe_sched_job_is_migration(job->engine)) {
-               emit_migration_job_gen12(job, job->engine->lrc,
+       if (xe_sched_job_is_migration(job->q)) {
+               emit_migration_job_gen12(job, job->q->lrc,
                                         xe_sched_job_seqno(job));
                return;
        }
 
-       for (i = 0; i < job->engine->width; ++i)
-               __emit_job_gen12_copy(job, job->engine->lrc + i,
+       for (i = 0; i < job->q->width; ++i)
+               __emit_job_gen12_copy(job, job->q->lrc + i,
                                      job->batch_addr[i],
                                      xe_sched_job_seqno(job));
 }
@@ -395,8 +395,8 @@ static void emit_job_gen12_video(struct xe_sched_job *job)
        int i;
 
        /* FIXME: Not doing parallel handshake for now */
-       for (i = 0; i < job->engine->width; ++i)
-               __emit_job_gen12_video(job, job->engine->lrc + i,
+       for (i = 0; i < job->q->width; ++i)
+               __emit_job_gen12_video(job, job->q->lrc + i,
                                       job->batch_addr[i],
                                       xe_sched_job_seqno(job));
 }
@@ -405,8 +405,8 @@ static void emit_job_gen12_render_compute(struct xe_sched_job *job)
 {
        int i;
 
-       for (i = 0; i < job->engine->width; ++i)
-               __emit_job_gen12_render_compute(job, job->engine->lrc + i,
+       for (i = 0; i < job->q->width; ++i)
+               __emit_job_gen12_render_compute(job, job->q->lrc + i,
                                                job->batch_addr[i],
                                                xe_sched_job_seqno(job));
 }
index 9944858de4d2d9306321dea18d4905c747c4ee66..de2851d24c968e2ccabbccad0e32e90c7bc5cfaf 100644 (file)
@@ -57,58 +57,58 @@ static struct xe_sched_job *job_alloc(bool parallel)
                                 xe_sched_job_slab, GFP_KERNEL);
 }
 
-bool xe_sched_job_is_migration(struct xe_engine *e)
+bool xe_sched_job_is_migration(struct xe_exec_queue *q)
 {
-       return e->vm && (e->vm->flags & XE_VM_FLAG_MIGRATION) &&
-               !(e->flags & ENGINE_FLAG_WA);
+       return q->vm && (q->vm->flags & XE_VM_FLAG_MIGRATION) &&
+               !(q->flags & EXEC_QUEUE_FLAG_WA);
 }
 
 static void job_free(struct xe_sched_job *job)
 {
-       struct xe_engine *e = job->engine;
-       bool is_migration = xe_sched_job_is_migration(e);
+       struct xe_exec_queue *q = job->q;
+       bool is_migration = xe_sched_job_is_migration(q);
 
-       kmem_cache_free(xe_engine_is_parallel(job->engine) || is_migration ?
+       kmem_cache_free(xe_exec_queue_is_parallel(job->q) || is_migration ?
                        xe_sched_job_parallel_slab : xe_sched_job_slab, job);
 }
 
 static struct xe_device *job_to_xe(struct xe_sched_job *job)
 {
-       return gt_to_xe(job->engine->gt);
+       return gt_to_xe(job->q->gt);
 }
 
-struct xe_sched_job *xe_sched_job_create(struct xe_engine *e,
+struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
                                         u64 *batch_addr)
 {
        struct xe_sched_job *job;
        struct dma_fence **fences;
-       bool is_migration = xe_sched_job_is_migration(e);
+       bool is_migration = xe_sched_job_is_migration(q);
        int err;
        int i, j;
        u32 width;
 
        /* Migration and kernel engines have their own locking */
-       if (!(e->flags & (ENGINE_FLAG_KERNEL | ENGINE_FLAG_VM |
-                         ENGINE_FLAG_WA))) {
-               lockdep_assert_held(&e->vm->lock);
-               if (!xe_vm_no_dma_fences(e->vm))
-                       xe_vm_assert_held(e->vm);
+       if (!(q->flags & (EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_VM |
+                         EXEC_QUEUE_FLAG_WA))) {
+               lockdep_assert_held(&q->vm->lock);
+               if (!xe_vm_no_dma_fences(q->vm))
+                       xe_vm_assert_held(q->vm);
        }
 
-       job = job_alloc(xe_engine_is_parallel(e) || is_migration);
+       job = job_alloc(xe_exec_queue_is_parallel(q) || is_migration);
        if (!job)
                return ERR_PTR(-ENOMEM);
 
-       job->engine = e;
+       job->q = q;
        kref_init(&job->refcount);
-       xe_engine_get(job->engine);
+       xe_exec_queue_get(job->q);
 
-       err = drm_sched_job_init(&job->drm, e->entity, 1, NULL);
+       err = drm_sched_job_init(&job->drm, q->entity, 1, NULL);
        if (err)
                goto err_free;
 
-       if (!xe_engine_is_parallel(e)) {
-               job->fence = xe_lrc_create_seqno_fence(e->lrc);
+       if (!xe_exec_queue_is_parallel(q)) {
+               job->fence = xe_lrc_create_seqno_fence(q->lrc);
                if (IS_ERR(job->fence)) {
                        err = PTR_ERR(job->fence);
                        goto err_sched_job;
@@ -116,38 +116,38 @@ struct xe_sched_job *xe_sched_job_create(struct xe_engine *e,
        } else {
                struct dma_fence_array *cf;
 
-               fences = kmalloc_array(e->width, sizeof(*fences), GFP_KERNEL);
+               fences = kmalloc_array(q->width, sizeof(*fences), GFP_KERNEL);
                if (!fences) {
                        err = -ENOMEM;
                        goto err_sched_job;
                }
 
-               for (j = 0; j < e->width; ++j) {
-                       fences[j] = xe_lrc_create_seqno_fence(e->lrc + j);
+               for (j = 0; j < q->width; ++j) {
+                       fences[j] = xe_lrc_create_seqno_fence(q->lrc + j);
                        if (IS_ERR(fences[j])) {
                                err = PTR_ERR(fences[j]);
                                goto err_fences;
                        }
                }
 
-               cf = dma_fence_array_create(e->width, fences,
-                                           e->parallel.composite_fence_ctx,
-                                           e->parallel.composite_fence_seqno++,
+               cf = dma_fence_array_create(q->width, fences,
+                                           q->parallel.composite_fence_ctx,
+                                           q->parallel.composite_fence_seqno++,
                                            false);
                if (!cf) {
-                       --e->parallel.composite_fence_seqno;
+                       --q->parallel.composite_fence_seqno;
                        err = -ENOMEM;
                        goto err_fences;
                }
 
                /* Sanity check */
-               for (j = 0; j < e->width; ++j)
+               for (j = 0; j < q->width; ++j)
                        XE_WARN_ON(cf->base.seqno != fences[j]->seqno);
 
                job->fence = &cf->base;
        }
 
-       width = e->width;
+       width = q->width;
        if (is_migration)
                width = 2;
 
@@ -155,7 +155,7 @@ struct xe_sched_job *xe_sched_job_create(struct xe_engine *e,
                job->batch_addr[i] = batch_addr[i];
 
        /* All other jobs require a VM to be open which has a ref */
-       if (unlikely(e->flags & ENGINE_FLAG_KERNEL))
+       if (unlikely(q->flags & EXEC_QUEUE_FLAG_KERNEL))
                xe_device_mem_access_get(job_to_xe(job));
        xe_device_assert_mem_access(job_to_xe(job));
 
@@ -164,14 +164,14 @@ struct xe_sched_job *xe_sched_job_create(struct xe_engine *e,
 
 err_fences:
        for (j = j - 1; j >= 0; --j) {
-               --e->lrc[j].fence_ctx.next_seqno;
+               --q->lrc[j].fence_ctx.next_seqno;
                dma_fence_put(fences[j]);
        }
        kfree(fences);
 err_sched_job:
        drm_sched_job_cleanup(&job->drm);
 err_free:
-       xe_engine_put(e);
+       xe_exec_queue_put(q);
        job_free(job);
        return ERR_PTR(err);
 }
@@ -188,9 +188,9 @@ void xe_sched_job_destroy(struct kref *ref)
        struct xe_sched_job *job =
                container_of(ref, struct xe_sched_job, refcount);
 
-       if (unlikely(job->engine->flags & ENGINE_FLAG_KERNEL))
+       if (unlikely(job->q->flags & EXEC_QUEUE_FLAG_KERNEL))
                xe_device_mem_access_put(job_to_xe(job));
-       xe_engine_put(job->engine);
+       xe_exec_queue_put(job->q);
        dma_fence_put(job->fence);
        drm_sched_job_cleanup(&job->drm);
        job_free(job);
@@ -222,12 +222,12 @@ void xe_sched_job_set_error(struct xe_sched_job *job, int error)
        trace_xe_sched_job_set_error(job);
 
        dma_fence_enable_sw_signaling(job->fence);
-       xe_hw_fence_irq_run(job->engine->fence_irq);
+       xe_hw_fence_irq_run(job->q->fence_irq);
 }
 
 bool xe_sched_job_started(struct xe_sched_job *job)
 {
-       struct xe_lrc *lrc = job->engine->lrc;
+       struct xe_lrc *lrc = job->q->lrc;
 
        return !__dma_fence_is_later(xe_sched_job_seqno(job),
                                     xe_lrc_start_seqno(lrc),
@@ -236,7 +236,7 @@ bool xe_sched_job_started(struct xe_sched_job *job)
 
 bool xe_sched_job_completed(struct xe_sched_job *job)
 {
-       struct xe_lrc *lrc = job->engine->lrc;
+       struct xe_lrc *lrc = job->q->lrc;
 
        /*
         * Can safely check just LRC[0] seqno as that is last seqno written when
index 5315ad8656c2494d1b7a076b65052f0811f1b1bc..6ca1d426c036f557f538fa78191a2908fb49ee23 100644 (file)
@@ -14,7 +14,7 @@
 int xe_sched_job_module_init(void);
 void xe_sched_job_module_exit(void);
 
-struct xe_sched_job *xe_sched_job_create(struct xe_engine *e,
+struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
                                         u64 *batch_addr);
 void xe_sched_job_destroy(struct kref *ref);
 
@@ -71,6 +71,6 @@ xe_sched_job_add_migrate_flush(struct xe_sched_job *job, u32 flags)
        job->migrate_flush_flags = flags;
 }
 
-bool xe_sched_job_is_migration(struct xe_engine *e);
+bool xe_sched_job_is_migration(struct xe_exec_queue *q);
 
 #endif
index 5534bfacaa168e2f13609a3f4bcdf3bf1e1f6e86..71213ba9735bc7f5a98d9e3de1d66f38fa97c8a0 100644 (file)
@@ -10,7 +10,7 @@
 
 #include <drm/gpu_scheduler.h>
 
-struct xe_engine;
+struct xe_exec_queue;
 
 /**
  * struct xe_sched_job - XE schedule job (batch buffer tracking)
@@ -18,8 +18,8 @@ struct xe_engine;
 struct xe_sched_job {
        /** @drm: base DRM scheduler job */
        struct drm_sched_job drm;
-       /** @engine: XE submission engine */
-       struct xe_engine *engine;
+       /** @q: Exec queue */
+       struct xe_exec_queue *q;
        /** @refcount: ref count of this job */
        struct kref refcount;
        /**
index 82ca25d8d017f95eb8a85d81943e04f87f244d3b..5ea458dadf6999337feaf3e6f91498a477da1f33 100644 (file)
 #include <linux/types.h>
 
 #include "xe_bo_types.h"
-#include "xe_engine_types.h"
+#include "xe_exec_queue_types.h"
 #include "xe_gpu_scheduler_types.h"
 #include "xe_gt_tlb_invalidation_types.h"
 #include "xe_gt_types.h"
-#include "xe_guc_engine_types.h"
+#include "xe_guc_exec_queue_types.h"
 #include "xe_sched_job.h"
 #include "xe_vm.h"
 
@@ -105,9 +105,9 @@ DEFINE_EVENT(xe_bo, xe_bo_move,
             TP_ARGS(bo)
 );
 
-DECLARE_EVENT_CLASS(xe_engine,
-                   TP_PROTO(struct xe_engine *e),
-                   TP_ARGS(e),
+DECLARE_EVENT_CLASS(xe_exec_queue,
+                   TP_PROTO(struct xe_exec_queue *q),
+                   TP_ARGS(q),
 
                    TP_STRUCT__entry(
                             __field(enum xe_engine_class, class)
@@ -120,13 +120,13 @@ DECLARE_EVENT_CLASS(xe_engine,
                             ),
 
                    TP_fast_assign(
-                          __entry->class = e->class;
-                          __entry->logical_mask = e->logical_mask;
-                          __entry->gt_id = e->gt->info.id;
-                          __entry->width = e->width;
-                          __entry->guc_id = e->guc->id;
-                          __entry->guc_state = atomic_read(&e->guc->state);
-                          __entry->flags = e->flags;
+                          __entry->class = q->class;
+                          __entry->logical_mask = q->logical_mask;
+                          __entry->gt_id = q->gt->info.id;
+                          __entry->width = q->width;
+                          __entry->guc_id = q->guc->id;
+                          __entry->guc_state = atomic_read(&q->guc->state);
+                          __entry->flags = q->flags;
                           ),
 
                    TP_printk("%d:0x%x, gt=%d, width=%d, guc_id=%d, guc_state=0x%x, flags=0x%x",
@@ -135,94 +135,94 @@ DECLARE_EVENT_CLASS(xe_engine,
                              __entry->guc_state, __entry->flags)
 );
 
-DEFINE_EVENT(xe_engine, xe_engine_create,
-            TP_PROTO(struct xe_engine *e),
-            TP_ARGS(e)
+DEFINE_EVENT(xe_exec_queue, xe_exec_queue_create,
+            TP_PROTO(struct xe_exec_queue *q),
+            TP_ARGS(q)
 );
 
-DEFINE_EVENT(xe_engine, xe_engine_supress_resume,
-            TP_PROTO(struct xe_engine *e),
-            TP_ARGS(e)
+DEFINE_EVENT(xe_exec_queue, xe_exec_queue_supress_resume,
+            TP_PROTO(struct xe_exec_queue *q),
+            TP_ARGS(q)
 );
 
-DEFINE_EVENT(xe_engine, xe_engine_submit,
-            TP_PROTO(struct xe_engine *e),
-            TP_ARGS(e)
+DEFINE_EVENT(xe_exec_queue, xe_exec_queue_submit,
+            TP_PROTO(struct xe_exec_queue *q),
+            TP_ARGS(q)
 );
 
-DEFINE_EVENT(xe_engine, xe_engine_scheduling_enable,
-            TP_PROTO(struct xe_engine *e),
-            TP_ARGS(e)
+DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_enable,
+            TP_PROTO(struct xe_exec_queue *q),
+            TP_ARGS(q)
 );
 
-DEFINE_EVENT(xe_engine, xe_engine_scheduling_disable,
-            TP_PROTO(struct xe_engine *e),
-            TP_ARGS(e)
+DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_disable,
+            TP_PROTO(struct xe_exec_queue *q),
+            TP_ARGS(q)
 );
 
-DEFINE_EVENT(xe_engine, xe_engine_scheduling_done,
-            TP_PROTO(struct xe_engine *e),
-            TP_ARGS(e)
+DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_done,
+            TP_PROTO(struct xe_exec_queue *q),
+            TP_ARGS(q)
 );
 
-DEFINE_EVENT(xe_engine, xe_engine_register,
-            TP_PROTO(struct xe_engine *e),
-            TP_ARGS(e)
+DEFINE_EVENT(xe_exec_queue, xe_exec_queue_register,
+            TP_PROTO(struct xe_exec_queue *q),
+            TP_ARGS(q)
 );
 
-DEFINE_EVENT(xe_engine, xe_engine_deregister,
-            TP_PROTO(struct xe_engine *e),
-            TP_ARGS(e)
+DEFINE_EVENT(xe_exec_queue, xe_exec_queue_deregister,
+            TP_PROTO(struct xe_exec_queue *q),
+            TP_ARGS(q)
 );
 
-DEFINE_EVENT(xe_engine, xe_engine_deregister_done,
-            TP_PROTO(struct xe_engine *e),
-            TP_ARGS(e)
+DEFINE_EVENT(xe_exec_queue, xe_exec_queue_deregister_done,
+            TP_PROTO(struct xe_exec_queue *q),
+            TP_ARGS(q)
 );
 
-DEFINE_EVENT(xe_engine, xe_engine_close,
-            TP_PROTO(struct xe_engine *e),
-            TP_ARGS(e)
+DEFINE_EVENT(xe_exec_queue, xe_exec_queue_close,
+            TP_PROTO(struct xe_exec_queue *q),
+            TP_ARGS(q)
 );
 
-DEFINE_EVENT(xe_engine, xe_engine_kill,
-            TP_PROTO(struct xe_engine *e),
-            TP_ARGS(e)
+DEFINE_EVENT(xe_exec_queue, xe_exec_queue_kill,
+            TP_PROTO(struct xe_exec_queue *q),
+            TP_ARGS(q)
 );
 
-DEFINE_EVENT(xe_engine, xe_engine_cleanup_entity,
-            TP_PROTO(struct xe_engine *e),
-            TP_ARGS(e)
+DEFINE_EVENT(xe_exec_queue, xe_exec_queue_cleanup_entity,
+            TP_PROTO(struct xe_exec_queue *q),
+            TP_ARGS(q)
 );
 
-DEFINE_EVENT(xe_engine, xe_engine_destroy,
-            TP_PROTO(struct xe_engine *e),
-            TP_ARGS(e)
+DEFINE_EVENT(xe_exec_queue, xe_exec_queue_destroy,
+            TP_PROTO(struct xe_exec_queue *q),
+            TP_ARGS(q)
 );
 
-DEFINE_EVENT(xe_engine, xe_engine_reset,
-            TP_PROTO(struct xe_engine *e),
-            TP_ARGS(e)
+DEFINE_EVENT(xe_exec_queue, xe_exec_queue_reset,
+            TP_PROTO(struct xe_exec_queue *q),
+            TP_ARGS(q)
 );
 
-DEFINE_EVENT(xe_engine, xe_engine_memory_cat_error,
-            TP_PROTO(struct xe_engine *e),
-            TP_ARGS(e)
+DEFINE_EVENT(xe_exec_queue, xe_exec_queue_memory_cat_error,
+            TP_PROTO(struct xe_exec_queue *q),
+            TP_ARGS(q)
 );
 
-DEFINE_EVENT(xe_engine, xe_engine_stop,
-            TP_PROTO(struct xe_engine *e),
-            TP_ARGS(e)
+DEFINE_EVENT(xe_exec_queue, xe_exec_queue_stop,
+            TP_PROTO(struct xe_exec_queue *q),
+            TP_ARGS(q)
 );
 
-DEFINE_EVENT(xe_engine, xe_engine_resubmit,
-            TP_PROTO(struct xe_engine *e),
-            TP_ARGS(e)
+DEFINE_EVENT(xe_exec_queue, xe_exec_queue_resubmit,
+            TP_PROTO(struct xe_exec_queue *q),
+            TP_ARGS(q)
 );
 
-DEFINE_EVENT(xe_engine, xe_engine_lr_cleanup,
-            TP_PROTO(struct xe_engine *e),
-            TP_ARGS(e)
+DEFINE_EVENT(xe_exec_queue, xe_exec_queue_lr_cleanup,
+            TP_PROTO(struct xe_exec_queue *q),
+            TP_ARGS(q)
 );
 
 DECLARE_EVENT_CLASS(xe_sched_job,
@@ -241,10 +241,10 @@ DECLARE_EVENT_CLASS(xe_sched_job,
 
                    TP_fast_assign(
                           __entry->seqno = xe_sched_job_seqno(job);
-                          __entry->guc_id = job->engine->guc->id;
+                          __entry->guc_id = job->q->guc->id;
                           __entry->guc_state =
-                          atomic_read(&job->engine->guc->state);
-                          __entry->flags = job->engine->flags;
+                          atomic_read(&job->q->guc->state);
+                          __entry->flags = job->q->flags;
                           __entry->error = job->fence->error;
                           __entry->fence = (unsigned long)job->fence;
                           __entry->batch_addr = (u64)job->batch_addr[0];
@@ -303,7 +303,7 @@ DECLARE_EVENT_CLASS(xe_sched_msg,
                    TP_fast_assign(
                           __entry->opcode = msg->opcode;
                           __entry->guc_id =
-                          ((struct xe_engine *)msg->private_data)->guc->id;
+                          ((struct xe_exec_queue *)msg->private_data)->guc->id;
                           ),
 
                    TP_printk("guc_id=%d, opcode=%u", __entry->guc_id,
index d3e82c4aed42bf0db81a3f6a4170fdeada107cd6..374f111eea9c59e65316046e9e738fe096851e4a 100644 (file)
@@ -165,15 +165,15 @@ out:
 
 static bool preempt_fences_waiting(struct xe_vm *vm)
 {
-       struct xe_engine *e;
+       struct xe_exec_queue *q;
 
        lockdep_assert_held(&vm->lock);
        xe_vm_assert_held(vm);
 
-       list_for_each_entry(e, &vm->preempt.engines, compute.link) {
-               if (!e->compute.pfence || (e->compute.pfence &&
-                   test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
-                            &e->compute.pfence->flags))) {
+       list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
+               if (!q->compute.pfence ||
+                   (q->compute.pfence && test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+                                                  &q->compute.pfence->flags))) {
                        return true;
                }
        }
@@ -195,10 +195,10 @@ static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
        lockdep_assert_held(&vm->lock);
        xe_vm_assert_held(vm);
 
-       if (*count >= vm->preempt.num_engines)
+       if (*count >= vm->preempt.num_exec_queues)
                return 0;
 
-       for (; *count < vm->preempt.num_engines; ++(*count)) {
+       for (; *count < vm->preempt.num_exec_queues; ++(*count)) {
                struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
 
                if (IS_ERR(pfence))
@@ -212,18 +212,18 @@ static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
 
 static int wait_for_existing_preempt_fences(struct xe_vm *vm)
 {
-       struct xe_engine *e;
+       struct xe_exec_queue *q;
 
        xe_vm_assert_held(vm);
 
-       list_for_each_entry(e, &vm->preempt.engines, compute.link) {
-               if (e->compute.pfence) {
-                       long timeout = dma_fence_wait(e->compute.pfence, false);
+       list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
+               if (q->compute.pfence) {
+                       long timeout = dma_fence_wait(q->compute.pfence, false);
 
                        if (timeout < 0)
                                return -ETIME;
-                       dma_fence_put(e->compute.pfence);
-                       e->compute.pfence = NULL;
+                       dma_fence_put(q->compute.pfence);
+                       q->compute.pfence = NULL;
                }
        }
 
@@ -232,11 +232,11 @@ static int wait_for_existing_preempt_fences(struct xe_vm *vm)
 
 static bool xe_vm_is_idle(struct xe_vm *vm)
 {
-       struct xe_engine *e;
+       struct xe_exec_queue *q;
 
        xe_vm_assert_held(vm);
-       list_for_each_entry(e, &vm->preempt.engines, compute.link) {
-               if (!xe_engine_is_idle(e))
+       list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
+               if (!xe_exec_queue_is_idle(q))
                        return false;
        }
 
@@ -246,36 +246,36 @@ static bool xe_vm_is_idle(struct xe_vm *vm)
 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
 {
        struct list_head *link;
-       struct xe_engine *e;
+       struct xe_exec_queue *q;
 
-       list_for_each_entry(e, &vm->preempt.engines, compute.link) {
+       list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
                struct dma_fence *fence;
 
                link = list->next;
                XE_WARN_ON(link == list);
 
                fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
-                                            e, e->compute.context,
-                                            ++e->compute.seqno);
-               dma_fence_put(e->compute.pfence);
-               e->compute.pfence = fence;
+                                            q, q->compute.context,
+                                            ++q->compute.seqno);
+               dma_fence_put(q->compute.pfence);
+               q->compute.pfence = fence;
        }
 }
 
 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
 {
-       struct xe_engine *e;
+       struct xe_exec_queue *q;
        struct ww_acquire_ctx ww;
        int err;
 
-       err = xe_bo_lock(bo, &ww, vm->preempt.num_engines, true);
+       err = xe_bo_lock(bo, &ww, vm->preempt.num_exec_queues, true);
        if (err)
                return err;
 
-       list_for_each_entry(e, &vm->preempt.engines, compute.link)
-               if (e->compute.pfence) {
+       list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
+               if (q->compute.pfence) {
                        dma_resv_add_fence(bo->ttm.base.resv,
-                                          e->compute.pfence,
+                                          q->compute.pfence,
                                           DMA_RESV_USAGE_BOOKKEEP);
                }
 
@@ -304,22 +304,22 @@ void xe_vm_fence_all_extobjs(struct xe_vm *vm, struct dma_fence *fence,
 
 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm)
 {
-       struct xe_engine *e;
+       struct xe_exec_queue *q;
 
        lockdep_assert_held(&vm->lock);
        xe_vm_assert_held(vm);
 
-       list_for_each_entry(e, &vm->preempt.engines, compute.link) {
-               e->ops->resume(e);
+       list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
+               q->ops->resume(q);
 
-               dma_resv_add_fence(xe_vm_resv(vm), e->compute.pfence,
+               dma_resv_add_fence(xe_vm_resv(vm), q->compute.pfence,
                                   DMA_RESV_USAGE_BOOKKEEP);
-               xe_vm_fence_all_extobjs(vm, e->compute.pfence,
+               xe_vm_fence_all_extobjs(vm, q->compute.pfence,
                                        DMA_RESV_USAGE_BOOKKEEP);
        }
 }
 
-int xe_vm_add_compute_engine(struct xe_vm *vm, struct xe_engine *e)
+int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
 {
        struct ttm_validate_buffer tv_onstack[XE_ONSTACK_TV];
        struct ttm_validate_buffer *tv;
@@ -337,16 +337,16 @@ int xe_vm_add_compute_engine(struct xe_vm *vm, struct xe_engine *e)
        if (err)
                goto out_unlock_outer;
 
-       pfence = xe_preempt_fence_create(e, e->compute.context,
-                                        ++e->compute.seqno);
+       pfence = xe_preempt_fence_create(q, q->compute.context,
+                                        ++q->compute.seqno);
        if (!pfence) {
                err = -ENOMEM;
                goto out_unlock;
        }
 
-       list_add(&e->compute.link, &vm->preempt.engines);
-       ++vm->preempt.num_engines;
-       e->compute.pfence = pfence;
+       list_add(&q->compute.link, &vm->preempt.exec_queues);
+       ++vm->preempt.num_exec_queues;
+       q->compute.pfence = pfence;
 
        down_read(&vm->userptr.notifier_lock);
 
@@ -518,7 +518,7 @@ void xe_vm_unlock_dma_resv(struct xe_vm *vm,
 static void xe_vm_kill(struct xe_vm *vm)
 {
        struct ww_acquire_ctx ww;
-       struct xe_engine *e;
+       struct xe_exec_queue *q;
 
        lockdep_assert_held(&vm->lock);
 
@@ -526,8 +526,8 @@ static void xe_vm_kill(struct xe_vm *vm)
        vm->flags |= XE_VM_FLAG_BANNED;
        trace_xe_vm_kill(vm);
 
-       list_for_each_entry(e, &vm->preempt.engines, compute.link)
-               e->ops->kill(e);
+       list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
+               q->ops->kill(q);
        xe_vm_unlock(vm, &ww);
 
        /* TODO: Inform user the VM is banned */
@@ -584,7 +584,7 @@ retry:
        }
 
        err = xe_vm_lock_dma_resv(vm, &ww, tv_onstack, &tv, &objs,
-                                 false, vm->preempt.num_engines);
+                                 false, vm->preempt.num_exec_queues);
        if (err)
                goto out_unlock_outer;
 
@@ -833,7 +833,7 @@ int xe_vm_userptr_check_repin(struct xe_vm *vm)
 }
 
 static struct dma_fence *
-xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e,
+xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
               struct xe_sync_entry *syncs, u32 num_syncs,
               bool first_op, bool last_op);
 
@@ -1241,7 +1241,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
 
        INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
 
-       INIT_LIST_HEAD(&vm->preempt.engines);
+       INIT_LIST_HEAD(&vm->preempt.exec_queues);
        vm->preempt.min_run_period_ms = 10;     /* FIXME: Wire up to uAPI */
 
        for_each_tile(tile, xe, id)
@@ -1320,21 +1320,21 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
                for_each_tile(tile, xe, id) {
                        struct xe_gt *gt = tile->primary_gt;
                        struct xe_vm *migrate_vm;
-                       struct xe_engine *eng;
+                       struct xe_exec_queue *q;
 
                        if (!vm->pt_root[id])
                                continue;
 
                        migrate_vm = xe_migrate_get_vm(tile->migrate);
-                       eng = xe_engine_create_class(xe, gt, migrate_vm,
-                                                    XE_ENGINE_CLASS_COPY,
-                                                    ENGINE_FLAG_VM);
+                       q = xe_exec_queue_create_class(xe, gt, migrate_vm,
+                                                      XE_ENGINE_CLASS_COPY,
+                                                      EXEC_QUEUE_FLAG_VM);
                        xe_vm_put(migrate_vm);
-                       if (IS_ERR(eng)) {
-                               err = PTR_ERR(eng);
+                       if (IS_ERR(q)) {
+                               err = PTR_ERR(q);
                                goto err_close;
                        }
-                       vm->eng[id] = eng;
+                       vm->q[id] = q;
                        number_tiles++;
                }
        }
@@ -1422,7 +1422,7 @@ void xe_vm_close_and_put(struct xe_vm *vm)
        struct drm_gpuva *gpuva, *next;
        u8 id;
 
-       XE_WARN_ON(vm->preempt.num_engines);
+       XE_WARN_ON(vm->preempt.num_exec_queues);
 
        xe_vm_close(vm);
        flush_async_ops(vm);
@@ -1430,10 +1430,10 @@ void xe_vm_close_and_put(struct xe_vm *vm)
                flush_work(&vm->preempt.rebind_work);
 
        for_each_tile(tile, xe, id) {
-               if (vm->eng[id]) {
-                       xe_engine_kill(vm->eng[id]);
-                       xe_engine_put(vm->eng[id]);
-                       vm->eng[id] = NULL;
+               if (vm->q[id]) {
+                       xe_exec_queue_kill(vm->q[id]);
+                       xe_exec_queue_put(vm->q[id]);
+                       vm->q[id] = NULL;
                }
        }
 
@@ -1573,7 +1573,7 @@ u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
 }
 
 static struct dma_fence *
-xe_vm_unbind_vma(struct xe_vma *vma, struct xe_engine *e,
+xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
                 struct xe_sync_entry *syncs, u32 num_syncs,
                 bool first_op, bool last_op)
 {
@@ -1600,7 +1600,7 @@ xe_vm_unbind_vma(struct xe_vma *vma, struct xe_engine *e,
                if (!(vma->tile_present & BIT(id)))
                        goto next;
 
-               fence = __xe_pt_unbind_vma(tile, vma, e, first_op ? syncs : NULL,
+               fence = __xe_pt_unbind_vma(tile, vma, q, first_op ? syncs : NULL,
                                           first_op ? num_syncs : 0);
                if (IS_ERR(fence)) {
                        err = PTR_ERR(fence);
@@ -1611,8 +1611,8 @@ xe_vm_unbind_vma(struct xe_vma *vma, struct xe_engine *e,
                        fences[cur_fence++] = fence;
 
 next:
-               if (e && vm->pt_root[id] && !list_empty(&e->multi_gt_list))
-                       e = list_next_entry(e, multi_gt_list);
+               if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
+                       q = list_next_entry(q, multi_gt_list);
        }
 
        if (fences) {
@@ -1648,7 +1648,7 @@ err_fences:
 }
 
 static struct dma_fence *
-xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e,
+xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
               struct xe_sync_entry *syncs, u32 num_syncs,
               bool first_op, bool last_op)
 {
@@ -1675,7 +1675,7 @@ xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e,
                if (!(vma->tile_mask & BIT(id)))
                        goto next;
 
-               fence = __xe_pt_bind_vma(tile, vma, e ? e : vm->eng[id],
+               fence = __xe_pt_bind_vma(tile, vma, q ? q : vm->q[id],
                                         first_op ? syncs : NULL,
                                         first_op ? num_syncs : 0,
                                         vma->tile_present & BIT(id));
@@ -1688,8 +1688,8 @@ xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e,
                        fences[cur_fence++] = fence;
 
 next:
-               if (e && vm->pt_root[id] && !list_empty(&e->multi_gt_list))
-                       e = list_next_entry(e, multi_gt_list);
+               if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
+                       q = list_next_entry(q, multi_gt_list);
        }
 
        if (fences) {
@@ -1805,7 +1805,7 @@ int xe_vm_async_fence_wait_start(struct dma_fence *fence)
 }
 
 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
-                       struct xe_engine *e, struct xe_sync_entry *syncs,
+                       struct xe_exec_queue *q, struct xe_sync_entry *syncs,
                        u32 num_syncs, struct async_op_fence *afence,
                        bool immediate, bool first_op, bool last_op)
 {
@@ -1814,7 +1814,7 @@ static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
        xe_vm_assert_held(vm);
 
        if (immediate) {
-               fence = xe_vm_bind_vma(vma, e, syncs, num_syncs, first_op,
+               fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op,
                                       last_op);
                if (IS_ERR(fence))
                        return PTR_ERR(fence);
@@ -1836,7 +1836,7 @@ static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
        return 0;
 }
 
-static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_engine *e,
+static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
                      struct xe_bo *bo, struct xe_sync_entry *syncs,
                      u32 num_syncs, struct async_op_fence *afence,
                      bool immediate, bool first_op, bool last_op)
@@ -1852,12 +1852,12 @@ static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_engine *e,
                        return err;
        }
 
-       return __xe_vm_bind(vm, vma, e, syncs, num_syncs, afence, immediate,
+       return __xe_vm_bind(vm, vma, q, syncs, num_syncs, afence, immediate,
                            first_op, last_op);
 }
 
 static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
-                       struct xe_engine *e, struct xe_sync_entry *syncs,
+                       struct xe_exec_queue *q, struct xe_sync_entry *syncs,
                        u32 num_syncs, struct async_op_fence *afence,
                        bool first_op, bool last_op)
 {
@@ -1866,7 +1866,7 @@ static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
        xe_vm_assert_held(vm);
        xe_bo_assert_held(xe_vma_bo(vma));
 
-       fence = xe_vm_unbind_vma(vma, e, syncs, num_syncs, first_op, last_op);
+       fence = xe_vm_unbind_vma(vma, q, syncs, num_syncs, first_op, last_op);
        if (IS_ERR(fence))
                return PTR_ERR(fence);
        if (afence)
@@ -2074,7 +2074,7 @@ int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
        vm = xa_load(&xef->vm.xa, args->vm_id);
        if (XE_IOCTL_DBG(xe, !vm))
                err = -ENOENT;
-       else if (XE_IOCTL_DBG(xe, vm->preempt.num_engines))
+       else if (XE_IOCTL_DBG(xe, vm->preempt.num_exec_queues))
                err = -EBUSY;
        else
                xa_erase(&xef->vm.xa, args->vm_id);
@@ -2093,7 +2093,7 @@ static const u32 region_to_mem_type[] = {
 };
 
 static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
-                         struct xe_engine *e, u32 region,
+                         struct xe_exec_queue *q, u32 region,
                          struct xe_sync_entry *syncs, u32 num_syncs,
                          struct async_op_fence *afence, bool first_op,
                          bool last_op)
@@ -2109,7 +2109,7 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
        }
 
        if (vma->tile_mask != (vma->tile_present & ~vma->usm.tile_invalidated)) {
-               return xe_vm_bind(vm, vma, e, xe_vma_bo(vma), syncs, num_syncs,
+               return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
                                  afence, true, first_op, last_op);
        } else {
                int i;
@@ -2414,7 +2414,7 @@ static u64 xe_vma_max_pte_size(struct xe_vma *vma)
  * Parse operations list and create any resources needed for the operations
  * prior to fully committing to the operations. This setup can fail.
  */
-static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_engine *e,
+static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
                                   struct drm_gpuva_ops **ops, int num_ops_list,
                                   struct xe_sync_entry *syncs, u32 num_syncs,
                                   struct list_head *ops_list, bool async)
@@ -2434,9 +2434,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_engine *e,
                if (!fence)
                        return -ENOMEM;
 
-               seqno = e ? ++e->bind.fence_seqno : ++vm->async_ops.fence.seqno;
+               seqno = q ? ++q->bind.fence_seqno : ++vm->async_ops.fence.seqno;
                dma_fence_init(&fence->fence, &async_op_fence_ops,
-                              &vm->async_ops.lock, e ? e->bind.fence_ctx :
+                              &vm->async_ops.lock, q ? q->bind.fence_ctx :
                               vm->async_ops.fence.context, seqno);
 
                if (!xe_vm_no_dma_fences(vm)) {
@@ -2467,7 +2467,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_engine *e,
                                op->syncs = syncs;
                        }
 
-                       op->engine = e;
+                       op->q = q;
 
                        switch (op->base.op) {
                        case DRM_GPUVA_OP_MAP:
@@ -2677,7 +2677,7 @@ again:
 
        switch (op->base.op) {
        case DRM_GPUVA_OP_MAP:
-               err = xe_vm_bind(vm, vma, op->engine, xe_vma_bo(vma),
+               err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
                                 op->syncs, op->num_syncs, op->fence,
                                 op->map.immediate || !xe_vm_in_fault_mode(vm),
                                 op->flags & XE_VMA_OP_FIRST,
@@ -2693,7 +2693,7 @@ again:
                                vm->async_ops.munmap_rebind_inflight = true;
                                vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
                        }
-                       err = xe_vm_unbind(vm, vma, op->engine, op->syncs,
+                       err = xe_vm_unbind(vm, vma, op->q, op->syncs,
                                           op->num_syncs,
                                           !prev && !next ? op->fence : NULL,
                                           op->flags & XE_VMA_OP_FIRST,
@@ -2706,7 +2706,7 @@ again:
 
                if (prev) {
                        op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND;
-                       err = xe_vm_bind(vm, op->remap.prev, op->engine,
+                       err = xe_vm_bind(vm, op->remap.prev, op->q,
                                         xe_vma_bo(op->remap.prev), op->syncs,
                                         op->num_syncs,
                                         !next ? op->fence : NULL, true, false,
@@ -2719,7 +2719,7 @@ again:
 
                if (next) {
                        op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND;
-                       err = xe_vm_bind(vm, op->remap.next, op->engine,
+                       err = xe_vm_bind(vm, op->remap.next, op->q,
                                         xe_vma_bo(op->remap.next),
                                         op->syncs, op->num_syncs,
                                         op->fence, true, false,
@@ -2734,13 +2734,13 @@ again:
                break;
        }
        case DRM_GPUVA_OP_UNMAP:
-               err = xe_vm_unbind(vm, vma, op->engine, op->syncs,
+               err = xe_vm_unbind(vm, vma, op->q, op->syncs,
                                   op->num_syncs, op->fence,
                                   op->flags & XE_VMA_OP_FIRST,
                                   op->flags & XE_VMA_OP_LAST);
                break;
        case DRM_GPUVA_OP_PREFETCH:
-               err = xe_vm_prefetch(vm, vma, op->engine, op->prefetch.region,
+               err = xe_vm_prefetch(vm, vma, op->q, op->prefetch.region,
                                     op->syncs, op->num_syncs, op->fence,
                                     op->flags & XE_VMA_OP_FIRST,
                                     op->flags & XE_VMA_OP_LAST);
@@ -2819,8 +2819,8 @@ static void xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op *op)
                while (op->num_syncs--)
                        xe_sync_entry_cleanup(&op->syncs[op->num_syncs]);
                kfree(op->syncs);
-               if (op->engine)
-                       xe_engine_put(op->engine);
+               if (op->q)
+                       xe_exec_queue_put(op->q);
                if (op->fence)
                        dma_fence_put(&op->fence->fence);
        }
@@ -3174,7 +3174,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        struct xe_bo **bos = NULL;
        struct drm_gpuva_ops **ops = NULL;
        struct xe_vm *vm;
-       struct xe_engine *e = NULL;
+       struct xe_exec_queue *q = NULL;
        u32 num_syncs;
        struct xe_sync_entry *syncs = NULL;
        struct drm_xe_vm_bind_op *bind_ops;
@@ -3187,23 +3187,23 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        if (err)
                return err;
 
-       if (args->engine_id) {
-               e = xe_engine_lookup(xef, args->engine_id);
-               if (XE_IOCTL_DBG(xe, !e)) {
+       if (args->exec_queue_id) {
+               q = xe_exec_queue_lookup(xef, args->exec_queue_id);
+               if (XE_IOCTL_DBG(xe, !q)) {
                        err = -ENOENT;
                        goto free_objs;
                }
 
-               if (XE_IOCTL_DBG(xe, !(e->flags & ENGINE_FLAG_VM))) {
+               if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) {
                        err = -EINVAL;
-                       goto put_engine;
+                       goto put_exec_queue;
                }
        }
 
        vm = xe_vm_lookup(xef, args->vm_id);
        if (XE_IOCTL_DBG(xe, !vm)) {
                err = -EINVAL;
-               goto put_engine;
+               goto put_exec_queue;
        }
 
        err = down_write_killable(&vm->lock);
@@ -3357,7 +3357,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
                }
        }
 
-       err = vm_bind_ioctl_ops_parse(vm, e, ops, args->num_binds,
+       err = vm_bind_ioctl_ops_parse(vm, q, ops, args->num_binds,
                                      syncs, num_syncs, &ops_list, async);
        if (err)
                goto unwind_ops;
@@ -3391,9 +3391,9 @@ release_vm_lock:
        up_write(&vm->lock);
 put_vm:
        xe_vm_put(vm);
-put_engine:
-       if (e)
-               xe_engine_put(e);
+put_exec_queue:
+       if (q)
+               xe_exec_queue_put(q);
 free_objs:
        kfree(bos);
        kfree(ops);
index a1d30de37d2037e06b6515078e4707e0abfbdb1a..805236578140cf2a590a73c81f1a36344e0d5d38 100644 (file)
@@ -18,7 +18,7 @@ struct drm_file;
 struct ttm_buffer_object;
 struct ttm_validate_buffer;
 
-struct xe_engine;
+struct xe_exec_queue;
 struct xe_file;
 struct xe_sync_entry;
 
@@ -164,7 +164,7 @@ static inline bool xe_vm_no_dma_fences(struct xe_vm *vm)
        return xe_vm_in_compute_mode(vm) || xe_vm_in_fault_mode(vm);
 }
 
-int xe_vm_add_compute_engine(struct xe_vm *vm, struct xe_engine *e);
+int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
 
 int xe_vm_userptr_pin(struct xe_vm *vm);
 
index f7522f9ca40ee1e0ad630942ccf8765d6601d7ab..f8675c3da3b1440873bf53e74c30ece044e5bc57 100644 (file)
@@ -138,8 +138,8 @@ struct xe_vm {
 
        struct xe_device *xe;
 
-       /* engine used for (un)binding vma's */
-       struct xe_engine *eng[XE_MAX_TILES_PER_DEVICE];
+       /* exec queue used for (un)binding vma's */
+       struct xe_exec_queue *q[XE_MAX_TILES_PER_DEVICE];
 
        /** @lru_bulk_move: Bulk LRU move list for this VM's BOs */
        struct ttm_lru_bulk_move lru_bulk_move;
@@ -278,10 +278,10 @@ struct xe_vm {
                 * an engine again
                 */
                s64 min_run_period_ms;
-               /** @engines: list of engines attached to this VM */
-               struct list_head engines;
-               /** @num_engines: number user engines attached to this VM */
-               int num_engines;
+               /** @exec_queues: list of exec queues attached to this VM */
+               struct list_head exec_queues;
+               /** @num_exec_queues: number exec queues attached to this VM */
+               int num_exec_queues;
                /**
                 * @rebind_deactivated: Whether rebind has been temporarily deactivated
                 * due to no work available. Protected by the vm resv.
@@ -386,8 +386,8 @@ struct xe_vma_op {
         * operations is processed
         */
        struct drm_gpuva_ops *ops;
-       /** @engine: engine for this operation */
-       struct xe_engine *engine;
+       /** @q: exec queue for this operation */
+       struct xe_exec_queue *q;
        /**
         * @syncs: syncs for this operation, only used on first and last
         * operation
index 3d09e9e9267b5997b5e0fa05a8b09a0153ef664f..86f16d50e9ccc1144059c7c485985a9ccd06d009 100644 (file)
@@ -103,14 +103,14 @@ struct xe_user_extension {
 #define DRM_XE_VM_CREATE               0x03
 #define DRM_XE_VM_DESTROY              0x04
 #define DRM_XE_VM_BIND                 0x05
-#define DRM_XE_ENGINE_CREATE           0x06
-#define DRM_XE_ENGINE_DESTROY          0x07
+#define DRM_XE_EXEC_QUEUE_CREATE               0x06
+#define DRM_XE_EXEC_QUEUE_DESTROY              0x07
 #define DRM_XE_EXEC                    0x08
 #define DRM_XE_MMIO                    0x09
-#define DRM_XE_ENGINE_SET_PROPERTY     0x0a
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY 0x0a
 #define DRM_XE_WAIT_USER_FENCE         0x0b
 #define DRM_XE_VM_MADVISE              0x0c
-#define DRM_XE_ENGINE_GET_PROPERTY     0x0d
+#define DRM_XE_EXEC_QUEUE_GET_PROPERTY 0x0d
 
 /* Must be kept compact -- no holes */
 #define DRM_IOCTL_XE_DEVICE_QUERY              DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEVICE_QUERY, struct drm_xe_device_query)
@@ -119,12 +119,12 @@ struct xe_user_extension {
 #define DRM_IOCTL_XE_VM_CREATE                 DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_CREATE, struct drm_xe_vm_create)
 #define DRM_IOCTL_XE_VM_DESTROY                         DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_DESTROY, struct drm_xe_vm_destroy)
 #define DRM_IOCTL_XE_VM_BIND                    DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_BIND, struct drm_xe_vm_bind)
-#define DRM_IOCTL_XE_ENGINE_CREATE             DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_ENGINE_CREATE, struct drm_xe_engine_create)
-#define DRM_IOCTL_XE_ENGINE_GET_PROPERTY       DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_ENGINE_GET_PROPERTY, struct drm_xe_engine_get_property)
-#define DRM_IOCTL_XE_ENGINE_DESTROY             DRM_IOW(DRM_COMMAND_BASE + DRM_XE_ENGINE_DESTROY, struct drm_xe_engine_destroy)
+#define DRM_IOCTL_XE_EXEC_QUEUE_CREATE         DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_CREATE, struct drm_xe_exec_queue_create)
+#define DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY   DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_GET_PROPERTY, struct drm_xe_exec_queue_get_property)
+#define DRM_IOCTL_XE_EXEC_QUEUE_DESTROY                 DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_DESTROY, struct drm_xe_exec_queue_destroy)
 #define DRM_IOCTL_XE_EXEC                       DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
 #define DRM_IOCTL_XE_MMIO                      DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_MMIO, struct drm_xe_mmio)
-#define DRM_IOCTL_XE_ENGINE_SET_PROPERTY        DRM_IOW(DRM_COMMAND_BASE + DRM_XE_ENGINE_SET_PROPERTY, struct drm_xe_engine_set_property)
+#define DRM_IOCTL_XE_EXEC_QUEUE_SET_PROPERTY    DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_SET_PROPERTY, struct drm_xe_exec_queue_set_property)
 #define DRM_IOCTL_XE_WAIT_USER_FENCE           DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
 #define DRM_IOCTL_XE_VM_MADVISE                         DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_MADVISE, struct drm_xe_vm_madvise)
 
@@ -649,11 +649,11 @@ struct drm_xe_vm_bind {
        __u32 vm_id;
 
        /**
-        * @engine_id: engine_id, must be of class DRM_XE_ENGINE_CLASS_VM_BIND
-        * and engine must have same vm_id. If zero, the default VM bind engine
+        * @exec_queue_id: exec_queue_id, must be of class DRM_XE_ENGINE_CLASS_VM_BIND
+        * and exec queue must have same vm_id. If zero, the default VM bind engine
         * is used.
         */
-       __u32 engine_id;
+       __u32 exec_queue_id;
 
        /** @num_binds: number of binds in this IOCTL */
        __u32 num_binds;
@@ -685,8 +685,8 @@ struct drm_xe_vm_bind {
        __u64 reserved[2];
 };
 
-/** struct drm_xe_ext_engine_set_property - engine set property extension */
-struct drm_xe_ext_engine_set_property {
+/** struct drm_xe_ext_exec_queue_set_property - exec queue set property extension */
+struct drm_xe_ext_exec_queue_set_property {
        /** @base: base user extension */
        struct xe_user_extension base;
 
@@ -701,32 +701,32 @@ struct drm_xe_ext_engine_set_property {
 };
 
 /**
- * struct drm_xe_engine_set_property - engine set property
+ * struct drm_xe_exec_queue_set_property - exec queue set property
  *
- * Same namespace for extensions as drm_xe_engine_create
+ * Same namespace for extensions as drm_xe_exec_queue_create
  */
-struct drm_xe_engine_set_property {
+struct drm_xe_exec_queue_set_property {
        /** @extensions: Pointer to the first extension struct, if any */
        __u64 extensions;
 
-       /** @engine_id: Engine ID */
-       __u32 engine_id;
+       /** @exec_queue_id: Exec queue ID */
+       __u32 exec_queue_id;
 
-#define XE_ENGINE_SET_PROPERTY_PRIORITY                        0
-#define XE_ENGINE_SET_PROPERTY_TIMESLICE               1
-#define XE_ENGINE_SET_PROPERTY_PREEMPTION_TIMEOUT      2
+#define XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY                    0
+#define XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE           1
+#define XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT  2
        /*
         * Long running or ULLS engine mode. DMA fences not allowed in this
         * mode. Must match the value of DRM_XE_VM_CREATE_COMPUTE_MODE, serves
         * as a sanity check the UMD knows what it is doing. Can only be set at
         * engine create time.
         */
-#define XE_ENGINE_SET_PROPERTY_COMPUTE_MODE            3
-#define XE_ENGINE_SET_PROPERTY_PERSISTENCE             4
-#define XE_ENGINE_SET_PROPERTY_JOB_TIMEOUT             5
-#define XE_ENGINE_SET_PROPERTY_ACC_TRIGGER             6
-#define XE_ENGINE_SET_PROPERTY_ACC_NOTIFY              7
-#define XE_ENGINE_SET_PROPERTY_ACC_GRANULARITY         8
+#define XE_EXEC_QUEUE_SET_PROPERTY_COMPUTE_MODE                3
+#define XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE         4
+#define XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT         5
+#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER         6
+#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY          7
+#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY             8
        /** @property: property to set */
        __u32 property;
 
@@ -755,25 +755,25 @@ struct drm_xe_engine_class_instance {
        __u16 gt_id;
 };
 
-struct drm_xe_engine_create {
-#define XE_ENGINE_EXTENSION_SET_PROPERTY               0
+struct drm_xe_exec_queue_create {
+#define XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY               0
        /** @extensions: Pointer to the first extension struct, if any */
        __u64 extensions;
 
-       /** @width: submission width (number BB per exec) for this engine */
+       /** @width: submission width (number BB per exec) for this exec queue */
        __u16 width;
 
-       /** @num_placements: number of valid placements for this engine */
+       /** @num_placements: number of valid placements for this exec queue */
        __u16 num_placements;
 
-       /** @vm_id: VM to use for this engine */
+       /** @vm_id: VM to use for this exec queue */
        __u32 vm_id;
 
        /** @flags: MBZ */
        __u32 flags;
 
-       /** @engine_id: Returned engine ID */
-       __u32 engine_id;
+       /** @exec_queue_id: Returned exec queue ID */
+       __u32 exec_queue_id;
 
        /**
         * @instances: user pointer to a 2-d array of struct
@@ -788,14 +788,14 @@ struct drm_xe_engine_create {
        __u64 reserved[2];
 };
 
-struct drm_xe_engine_get_property {
+struct drm_xe_exec_queue_get_property {
        /** @extensions: Pointer to the first extension struct, if any */
        __u64 extensions;
 
-       /** @engine_id: Engine ID */
-       __u32 engine_id;
+       /** @exec_queue_id: Exec queue ID */
+       __u32 exec_queue_id;
 
-#define XE_ENGINE_GET_PROPERTY_BAN                     0
+#define XE_EXEC_QUEUE_GET_PROPERTY_BAN                 0
        /** @property: property to get */
        __u32 property;
 
@@ -806,9 +806,9 @@ struct drm_xe_engine_get_property {
        __u64 reserved[2];
 };
 
-struct drm_xe_engine_destroy {
-       /** @engine_id: Engine ID */
-       __u32 engine_id;
+struct drm_xe_exec_queue_destroy {
+       /** @exec_queue_id: Exec queue ID */
+       __u32 exec_queue_id;
 
        /** @pad: MBZ */
        __u32 pad;
@@ -855,8 +855,8 @@ struct drm_xe_exec {
        /** @extensions: Pointer to the first extension struct, if any */
        __u64 extensions;
 
-       /** @engine_id: Engine ID for the batch buffer */
-       __u32 engine_id;
+       /** @exec_queue_id: Exec queue ID for the batch buffer */
+       __u32 exec_queue_id;
 
        /** @num_syncs: Amount of struct drm_xe_sync in array. */
        __u32 num_syncs;