drm/i915: Assert all signalers we depended on did indeed signal
authorChris Wilson <chris@chris-wilson.co.uk>
Tue, 2 Jan 2018 15:12:25 +0000 (15:12 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Wed, 3 Jan 2018 12:05:41 +0000 (12:05 +0000)
Back up our comment that all signalers should have been signaled before
we ourselves were retired with an assert to that effect.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: MichaƂ Winiarski <michal.winiarski@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180102151235.3949-9-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_gem_request.c
drivers/gpu/drm/i915/i915_gem_request.h
drivers/gpu/drm/i915/intel_lrc.c

index d575109f7a7fe2fb09da5cb3b20037621d0ddc0a..72bdc203716fb9b8b62a63c8f81fe8f05760f474 100644 (file)
@@ -161,12 +161,16 @@ i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt)
 
        GEM_BUG_ON(!list_empty(&pt->link));
 
-       /* Everyone we depended upon (the fences we wait to be signaled)
+       /*
+        * Everyone we depended upon (the fences we wait to be signaled)
         * should retire before us and remove themselves from our list.
         * However, retirement is run independently on each timeline and
         * so we may be called out-of-order.
         */
        list_for_each_entry_safe(dep, next, &pt->signalers_list, signal_link) {
+               GEM_BUG_ON(!i915_priotree_signaled(dep->signaler));
+               GEM_BUG_ON(!list_empty(&dep->dfs_link));
+
                list_del(&dep->wait_link);
                if (dep->flags & I915_DEPENDENCY_ALLOC)
                        i915_dependency_free(i915, dep);
@@ -174,6 +178,9 @@ i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt)
 
        /* Remove ourselves from everyone who depends upon us */
        list_for_each_entry_safe(dep, next, &pt->waiters_list, wait_link) {
+               GEM_BUG_ON(dep->signaler != pt);
+               GEM_BUG_ON(!list_empty(&dep->dfs_link));
+
                list_del(&dep->signal_link);
                if (dep->flags & I915_DEPENDENCY_ALLOC)
                        i915_dependency_free(i915, dep);
index 04ee289d6cab467ff7119de4efc61c153b745a5b..6c607f8dbf92333513d4c2a804934adb3e52bcd8 100644 (file)
@@ -329,6 +329,14 @@ i915_gem_request_completed(const struct drm_i915_gem_request *req)
        return __i915_gem_request_completed(req, seqno);
 }
 
+static inline bool i915_priotree_signaled(const struct i915_priotree *pt)
+{
+       const struct drm_i915_gem_request *rq =
+               container_of(pt, const struct drm_i915_gem_request, priotree);
+
+       return i915_gem_request_completed(rq);
+}
+
 /* We treat requests as fences. This is not be to confused with our
  * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
  * We use the fences to synchronize access from the CPU with activity on the
index e114776b883671078dd21f0075121b0df58d518d..04c35e4dd7c6496056897ed8dfe544a38af12837 100644 (file)
@@ -1037,7 +1037,7 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
                 * engines.
                 */
                list_for_each_entry(p, &pt->signalers_list, signal_link) {
-                       if (i915_gem_request_completed(pt_to_request(p->signaler)))
+                       if (i915_priotree_signaled(p->signaler))
                                continue;
 
                        GEM_BUG_ON(p->signaler->priority < pt->priority);