drm/xe: Do not allow CPU address mirror VMA unbind if
authorMatthew Brost <matthew.brost@intel.com>
Thu, 6 Mar 2025 01:26:42 +0000 (17:26 -0800)
committerMatthew Brost <matthew.brost@intel.com>
Thu, 6 Mar 2025 19:35:49 +0000 (11:35 -0800)
uAPI is designed with the use case that only mapping a BO to a malloc'd
address will unbind a CPU-address mirror VMA. Therefore, allowing a
CPU-address mirror VMA to unbind when the GPU has bindings in the range
being unbound does not make much sense. This behavior is not supported,
as it simplifies the code. This decision can always be revisited if a
use case arises.

v3:
 - s/arrises/arises (Thomas)
 - s/system allocator/GPU address mirror (Thomas)
 - Kernel doc (Thomas)
 - Newline between function defs (Thomas)
v5:
 - Kernel doc (Thomas)
v6:
 - Only compile if CONFIG_DRM_GPUSVM selected (CI, Lucas)

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20250306012657.3505757-18-matthew.brost@intel.com
drivers/gpu/drm/xe/xe_svm.c
drivers/gpu/drm/xe/xe_svm.h
drivers/gpu/drm/xe/xe_vm.c

index a9d32cd69ae94a7f135c0cb22cb00eee2deff8e4..80076f4dc4b43894a3c89127c89c3aa0947e5af5 100644 (file)
@@ -434,3 +434,18 @@ err_out:
 
        return err;
 }
+
+/**
+ * xe_svm_has_mapping() - SVM has mappings
+ * @vm: The VM.
+ * @start: Start address.
+ * @end: End address.
+ *
+ * Check if an address range has SVM mappings.
+ *
+ * Return: True if address range has a SVM mapping, False otherwise
+ */
+bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
+{
+       return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end);
+}
index 87cbda5641bbc4b535a7898486c35d972b92fec5..35e044e492e040ce8e695a47984dedcaed5469dc 100644 (file)
@@ -57,6 +57,8 @@ void xe_svm_close(struct xe_vm *vm);
 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
                            struct xe_tile *tile, u64 fault_addr,
                            bool atomic);
+
+bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
 #else
 static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
 {
@@ -86,6 +88,12 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
 {
        return 0;
 }
+
+static inline
+bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
+{
+       return false;
+}
 #endif
 
 /**
index d0ed77c80f03b5afa57eedc550e9fa86ea9109e3..ea56b8379634e889a86486b1d0264a7cc51dd157 100644 (file)
@@ -2486,6 +2486,17 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
                        struct xe_vma *old =
                                gpuva_to_vma(op->base.remap.unmap->va);
                        bool skip = xe_vma_is_cpu_addr_mirror(old);
+                       u64 start = xe_vma_start(old), end = xe_vma_end(old);
+
+                       if (op->base.remap.prev)
+                               start = op->base.remap.prev->va.addr +
+                                       op->base.remap.prev->va.range;
+                       if (op->base.remap.next)
+                               end = op->base.remap.next->va.addr;
+
+                       if (xe_vma_is_cpu_addr_mirror(old) &&
+                           xe_svm_has_mapping(vm, start, end))
+                               return -EBUSY;
 
                        op->remap.start = xe_vma_start(old);
                        op->remap.range = xe_vma_size(old);
@@ -2567,6 +2578,11 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
                case DRM_GPUVA_OP_UNMAP:
                        vma = gpuva_to_vma(op->base.unmap.va);
 
+                       if (xe_vma_is_cpu_addr_mirror(vma) &&
+                           xe_svm_has_mapping(vm, xe_vma_start(vma),
+                                              xe_vma_end(vma)))
+                               return -EBUSY;
+
                        if (!xe_vma_is_cpu_addr_mirror(vma))
                                xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
                        break;