drm/xe/bo: consider dma-resv fences for clear job
authorMatthew Auld <matthew.auld@intel.com>
Wed, 25 Oct 2023 17:39:40 +0000 (18:39 +0100)
committerRodrigo Vivi <rodrigo.vivi@intel.com>
Thu, 21 Dec 2023 16:43:31 +0000 (11:43 -0500)
There could be active fences already in the dma-resv for the object
prior to clearing. Make sure to input them as dependencies for the clear
job.

v2 (Matt B):
  - We can use USAGE_KERNEL here, since it's only the move fences we
    care about here. Also add a comment.

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
drivers/gpu/drm/xe/xe_migrate.c

index 67b71244b1f2ec629aa5e0afcf20c67f0d689df8..53b5b36aca66416bdbf1f3b8f94ac6e5891e9a80 100644 (file)
@@ -980,8 +980,6 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
 
                size -= clear_L0;
 
-               /* TODO: Add dependencies here */
-
                /* Preemption is enabled again by the ring ops. */
                if (!clear_vram) {
                        emit_pte(m, bb, clear_L0_pt, clear_vram, &src_it, clear_L0,
@@ -1010,6 +1008,18 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
                }
 
                xe_sched_job_add_migrate_flush(job, flush_flags);
+               if (!fence) {
+                       /*
+                        * There can't be anything userspace related at this
+                        * point, so we just need to respect any potential move
+                        * fences, which are always tracked as
+                        * DMA_RESV_USAGE_KERNEL.
+                        */
+                       err = job_add_deps(job, bo->ttm.base.resv,
+                                          DMA_RESV_USAGE_KERNEL);
+                       if (err)
+                               goto err_job;
+               }
 
                xe_sched_job_arm(job);
                dma_fence_put(fence);
@@ -1024,6 +1034,8 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
                xe_bb_free(bb, fence);
                continue;
 
+err_job:
+               xe_sched_job_put(job);
 err:
                mutex_unlock(&m->job_mutex);
                xe_bb_free(bb, NULL);