drm/amdgpu: recreate fence from user seq
authorChristian König <christian.koenig@amd.com>
Fri, 19 Jun 2015 15:00:19 +0000 (17:00 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 29 Jun 2015 19:52:49 +0000 (15:52 -0400)
And use common fence infrastructure for the wait.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c

index 963c4ba5dcbaf98c65c578463fb64834756eb58b..01657830b470a49e8209fd39fa829d4a1fbb3610 100644 (file)
@@ -425,6 +425,8 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
                                   unsigned irq_type);
 int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
                      struct amdgpu_fence **fence);
+int amdgpu_fence_recreate(struct amdgpu_ring *ring, void *owner,
+                         uint64_t seq, struct amdgpu_fence **fence);
 void amdgpu_fence_process(struct amdgpu_ring *ring);
 int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
@@ -435,9 +437,6 @@ int amdgpu_fence_wait(struct amdgpu_fence *fence, bool interruptible);
 int amdgpu_fence_wait_any(struct amdgpu_device *adev,
                          struct amdgpu_fence **fences,
                          bool intr);
-long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev,
-                                  u64 *target_seq, bool intr,
-                                  long timeout);
 struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence);
 void amdgpu_fence_unref(struct amdgpu_fence **fence);
 
index 86b78c7991762b594387aa6c415c9d3ccdefea7a..84ba1d1bf3270e20e810d8d290e9764a028a0257 100644 (file)
@@ -739,9 +739,9 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
 {
        union drm_amdgpu_wait_cs *wait = data;
        struct amdgpu_device *adev = dev->dev_private;
-       uint64_t seq[AMDGPU_MAX_RINGS] = {0};
-       struct amdgpu_ring *ring = NULL;
        unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
+       struct amdgpu_fence *fence = NULL;
+       struct amdgpu_ring *ring = NULL;
        struct amdgpu_ctx *ctx;
        long r;
 
@@ -754,9 +754,12 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
        if (r)
                return r;
 
-       seq[ring->idx] = wait->in.handle;
+       r = amdgpu_fence_recreate(ring, filp, wait->in.handle, &fence);
+       if (r)
+               return r;
 
-       r = amdgpu_fence_wait_seq_timeout(adev, seq, true, timeout);
+       r = fence_wait_timeout(&fence->base, true, timeout);
+       amdgpu_fence_unref(&fence);
        amdgpu_ctx_put(ctx);
        if (r < 0)
                return r;
index f2d885c1da8fdf523396d625d7253f003ae14444..a7189a1fa6a17dc308075535c6ddc0fcf3403270 100644 (file)
@@ -135,6 +135,38 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
        return 0;
 }
 
+/**
+ * amdgpu_fence_recreate - recreate a fence from an user fence
+ *
+ * @ring: ring the fence is associated with
+ * @owner: creator of the fence
+ * @seq: user fence sequence number
+ * @fence: resulting amdgpu fence object
+ *
+ * Recreates a fence command from the user fence sequence number (all asics).
+ * Returns 0 on success, -ENOMEM on failure.
+ */
+int amdgpu_fence_recreate(struct amdgpu_ring *ring, void *owner,
+                         uint64_t seq, struct amdgpu_fence **fence)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       if (seq > ring->fence_drv.sync_seq[ring->idx])
+               return -EINVAL;
+
+       *fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL);
+       if ((*fence) == NULL)
+               return -ENOMEM;
+
+       (*fence)->seq = seq;
+       (*fence)->ring = ring;
+       (*fence)->owner = owner;
+       fence_init(&(*fence)->base, &amdgpu_fence_ops,
+               &adev->fence_queue.lock, adev->fence_context + ring->idx,
+               (*fence)->seq);
+       return 0;
+}
+
 /**
  * amdgpu_fence_check_signaled - callback from fence_queue
  *
@@ -517,8 +549,9 @@ static bool amdgpu_fence_any_seq_signaled(struct amdgpu_device *adev, u64 *seq)
  * the wait timeout, or an error for all other cases.
  * -EDEADLK is returned when a GPU lockup has been detected.
  */
-long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev, u64 *target_seq,
-                                  bool intr, long timeout)
+static long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev,
+                                         u64 *target_seq, bool intr,
+                                         long timeout)
 {
        uint64_t last_seq[AMDGPU_MAX_RINGS];
        bool signaled;