drm/xe/exec_queue: Prepare last fence for hw engine group resume context
authorFrancois Dugast <francois.dugast@intel.com>
Fri, 9 Aug 2024 15:51:31 +0000 (17:51 +0200)
committerMatthew Brost <matthew.brost@intel.com>
Sun, 18 Aug 2024 01:31:54 +0000 (18:31 -0700)
Ensure we can safely take a ref of the exec queue's last fence from the
context of resuming jobs from the hw engine group. The locking requirements
differ from the general case, hence the introduction of this new function.

v2: Add kernel doc, rework the code to prevent code duplication

v3: Fix kernel doc, remove now unnecessary lockdep variants (Matt Brost)

v4: Remove new put function (Matt Brost)

Signed-off-by: Francois Dugast <francois.dugast@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240809155156.1955925-7-francois.dugast@intel.com
drivers/gpu/drm/xe/xe_exec_queue.c
drivers/gpu/drm/xe/xe_exec_queue.h

index 3ce4582504f9e1e0e3878ce36a883717367ad824..6d3b44cbc4c7102f3bab5dfae80203b428c0be0a 100644 (file)
@@ -837,10 +837,12 @@ int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
 static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q,
                                                    struct xe_vm *vm)
 {
-       if (q->flags & EXEC_QUEUE_FLAG_VM)
+       if (q->flags & EXEC_QUEUE_FLAG_VM) {
                lockdep_assert_held(&vm->lock);
-       else
+       } else {
                xe_vm_assert_held(vm);
+               lockdep_assert_held(&q->hwe->hw_engine_group->mode_sem);
+       }
 }
 
 /**
@@ -894,6 +896,33 @@ struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q,
        return fence;
 }
 
+/**
+ * xe_exec_queue_last_fence_get_for_resume() - Get last fence
+ * @q: The exec queue
+ * @vm: The VM the engine does a bind or exec for
+ *
+ * Get last fence, takes a ref. Only safe to be called in the context of
+ * resuming the hw engine group's long-running exec queue, when the group
+ * semaphore is held.
+ *
+ * Returns: last fence if not signaled, dma fence stub if signaled
+ */
+struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *q,
+                                                         struct xe_vm *vm)
+{
+       struct dma_fence *fence;
+
+       lockdep_assert_held_write(&q->hwe->hw_engine_group->mode_sem);
+
+       if (q->last_fence &&
+           test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
+               xe_exec_queue_last_fence_put_unlocked(q);
+
+       fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
+       dma_fence_get(fence);
+       return fence;
+}
+
 /**
  * xe_exec_queue_last_fence_set() - Set last fence
  * @q: The exec queue
index 99139368ba6e72e18f546ca19f7916cea1050285..90c7f73eab88430942199e150e716841113f76fa 100644 (file)
@@ -77,6 +77,8 @@ void xe_exec_queue_last_fence_put(struct xe_exec_queue *e, struct xe_vm *vm);
 void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *e);
 struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *e,
                                               struct xe_vm *vm);
+struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *e,
+                                                         struct xe_vm *vm);
 void xe_exec_queue_last_fence_set(struct xe_exec_queue *e, struct xe_vm *vm,
                                  struct dma_fence *fence);
 int xe_exec_queue_last_fence_test_dep(struct xe_exec_queue *q,