drm/xe: Annotate each dumpable vma as such
authorMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Wed, 21 Feb 2024 13:30:19 +0000 (14:30 +0100)
committerMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Wed, 21 Feb 2024 19:08:22 +0000 (20:08 +0100)
In preparation for snapshot dumping, mark each dumpable VMA as such, so
we can walk over the VM later and dump it.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: José Roberto de Souza <jose.souza@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240221133024.898315-4-maarten.lankhorst@linux.intel.com
drivers/gpu/drm/xe/xe_vm.c
drivers/gpu/drm/xe/xe_vm_types.h

index 08462f80f000efb0c1dc12be12b0a80895a6c82f..df1e3841005d48a6c76e28b3f5bfc10e5728c266 100644 (file)
@@ -792,6 +792,7 @@ static void xe_vma_free(struct xe_vma *vma)
 
 #define VMA_CREATE_FLAG_READ_ONLY      BIT(0)
 #define VMA_CREATE_FLAG_IS_NULL                BIT(1)
+#define VMA_CREATE_FLAG_DUMPABLE       BIT(2)
 
 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
                                    struct xe_bo *bo,
@@ -804,6 +805,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
        u8 id;
        bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY);
        bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL);
+       bool dumpable = (flags & VMA_CREATE_FLAG_DUMPABLE);
 
        xe_assert(vm->xe, start < end);
        xe_assert(vm->xe, end < vm->size);
@@ -838,6 +840,8 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
        vma->gpuva.va.range = end - start + 1;
        if (read_only)
                vma->gpuva.flags |= XE_VMA_READ_ONLY;
+       if (dumpable)
+               vma->gpuva.flags |= XE_VMA_DUMPABLE;
 
        for_each_tile(tile, vm->xe, id)
                vma->tile_mask |= 0x1 << id;
@@ -2122,6 +2126,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
                        op->map.read_only =
                                flags & DRM_XE_VM_BIND_FLAG_READONLY;
                        op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
+                       op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
                        op->map.pat_index = pat_index;
                } else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
                        op->prefetch.region = prefetch_region;
@@ -2317,6 +2322,8 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
                                VMA_CREATE_FLAG_READ_ONLY : 0;
                        flags |= op->map.is_null ?
                                VMA_CREATE_FLAG_IS_NULL : 0;
+                       flags |= op->map.dumpable ?
+                               VMA_CREATE_FLAG_DUMPABLE : 0;
 
                        vma = new_vma(vm, &op->base.map, op->map.pat_index,
                                      flags);
@@ -2341,6 +2348,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
                                flags |= op->base.remap.unmap->va->flags &
                                        DRM_GPUVA_SPARSE ?
                                        VMA_CREATE_FLAG_IS_NULL : 0;
+                               flags |= op->base.remap.unmap->va->flags &
+                                       XE_VMA_DUMPABLE ?
+                                       VMA_CREATE_FLAG_DUMPABLE : 0;
 
                                vma = new_vma(vm, op->base.remap.prev,
                                              old->pat_index, flags);
@@ -2372,6 +2382,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
                                flags |= op->base.remap.unmap->va->flags &
                                        DRM_GPUVA_SPARSE ?
                                        VMA_CREATE_FLAG_IS_NULL : 0;
+                               flags |= op->base.remap.unmap->va->flags &
+                                       XE_VMA_DUMPABLE ?
+                                       VMA_CREATE_FLAG_DUMPABLE : 0;
 
                                vma = new_vma(vm, op->base.remap.next,
                                              old->pat_index, flags);
index a603cc2eb56b3f574f3e76612c10bb6e42fbedb9..a975ac83eccae45a403297f9ae88bef34a3c5957 100644 (file)
@@ -31,6 +31,7 @@ struct xe_vm;
 #define XE_VMA_PTE_1G          (DRM_GPUVA_USERBITS << 7)
 #define XE_VMA_PTE_64K         (DRM_GPUVA_USERBITS << 8)
 #define XE_VMA_PTE_COMPACT     (DRM_GPUVA_USERBITS << 9)
+#define XE_VMA_DUMPABLE                (DRM_GPUVA_USERBITS << 10)
 
 /** struct xe_userptr - User pointer */
 struct xe_userptr {
@@ -294,6 +295,8 @@ struct xe_vma_op_map {
        bool read_only;
        /** @is_null: is NULL binding */
        bool is_null;
+       /** @dumpable: whether BO is dumped on GPU hang */
+       bool dumpable;
        /** @pat_index: The pat index to use for this operation. */
        u16 pat_index;
 };