drm/xe: Only allow 1 ufence per exec / bind IOCTL
authorMatthew Brost <matthew.brost@intel.com>
Wed, 24 Jan 2024 23:44:13 +0000 (15:44 -0800)
committerThomas Hellström <thomas.hellstrom@linux.intel.com>
Thu, 1 Feb 2024 10:26:15 +0000 (11:26 +0100)
The way exec ufences are coded only 1 ufence per IOCTL will be signaled.
It is possible to fix this but for current use cases 1 ufence per IOCTL
is sufficient. Enforce a limit of 1 ufence per IOCTL (both exec and bind
to be uniform).

v2:
- Add fixes tag (Thomas)

Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
Cc: Mika Kahola <mika.kahola@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Brian Welty <brian.welty@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240124234413.1640825-1-matthew.brost@intel.com
(cherry picked from commit d1df9bfbf68c65418f30917f406b6d5bd597714e)
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
drivers/gpu/drm/xe/xe_exec.c
drivers/gpu/drm/xe/xe_sync.h
drivers/gpu/drm/xe/xe_vm.c

index b853feed9ccc15eefab7f0ccdf070096521e6015..17f26952e6656b8a077eb51161acbfd96638db2c 100644 (file)
@@ -111,7 +111,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        u64 addresses[XE_HW_ENGINE_MAX_INSTANCE];
        struct drm_gpuvm_exec vm_exec = {.extra.fn = xe_exec_fn};
        struct drm_exec *exec = &vm_exec.exec;
-       u32 i, num_syncs = 0;
+       u32 i, num_syncs = 0, num_ufence = 0;
        struct xe_sched_job *job;
        struct dma_fence *rebind_fence;
        struct xe_vm *vm;
@@ -157,6 +157,14 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
                                           SYNC_PARSE_FLAG_LR_MODE : 0));
                if (err)
                        goto err_syncs;
+
+               if (xe_sync_is_ufence(&syncs[i]))
+                       num_ufence++;
+       }
+
+       if (XE_IOCTL_DBG(xe, num_ufence > 1)) {
+               err = -EINVAL;
+               goto err_syncs;
        }
 
        if (xe_exec_queue_is_parallel(q)) {
index d284afbe917c19203473b30d0abc38ca88ffbfa2..f43cdcaca6c5794ec8b42ab3bc77e1942004d046 100644 (file)
@@ -33,4 +33,9 @@ struct dma_fence *
 xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync,
                     struct xe_exec_queue *q, struct xe_vm *vm);
 
+static inline bool xe_sync_is_ufence(struct xe_sync_entry *sync)
+{
+       return !!sync->ufence;
+}
+
 #endif
index 53833ab81424ceeca8edd95e15a2c6a34e681f6c..32ae51945439a770fc3b5bb979314eef06f7ff68 100644 (file)
@@ -2851,7 +2851,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        struct drm_gpuva_ops **ops = NULL;
        struct xe_vm *vm;
        struct xe_exec_queue *q = NULL;
-       u32 num_syncs;
+       u32 num_syncs, num_ufence = 0;
        struct xe_sync_entry *syncs = NULL;
        struct drm_xe_vm_bind_op *bind_ops;
        LIST_HEAD(ops_list);
@@ -2988,6 +2988,14 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
                                           SYNC_PARSE_FLAG_DISALLOW_USER_FENCE : 0));
                if (err)
                        goto free_syncs;
+
+               if (xe_sync_is_ufence(&syncs[num_syncs]))
+                       num_ufence++;
+       }
+
+       if (XE_IOCTL_DBG(xe, num_ufence > 1)) {
+               err = -EINVAL;
+               goto free_syncs;
        }
 
        if (!args->num_binds) {