drm/i915/vma: Move the bind_count vs pin_count assertion to a helper
authorChris Wilson <chris@chris-wilson.co.uk>
Tue, 5 Jun 2018 09:41:07 +0000 (10:41 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Tue, 5 Jun 2018 14:16:07 +0000 (15:16 +0100)
To spare ourselves a long line later, refactor the repeated check of
bind_count vs pin_count to a helper.

v2: Fix up the commentary!

Suggested-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Matthew Auld <matthew.william.auld@gmail.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180605094107.31367-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_vma.c

index 9324d476e0a7c356b39cb02374e904a2b0a95262..10bf654cd0235daf4b0a9c02ab4a8b7f0f0e222c 100644 (file)
@@ -459,6 +459,18 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
        return true;
 }
 
+static void assert_bind_count(const struct drm_i915_gem_object *obj)
+{
+       /*
+        * Combine the assertion that the object is bound and that we have
+        * pinned its pages. But we should never have bound the object
+        * more than we have pinned its pages. (For complete accuracy, we
+        * assume that no else is pinning the pages, but as a rough assertion
+        * that we will not run into problems later, this will do!)
+        */
+       GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+}
+
 /**
  * i915_vma_insert - finds a slot for the vma in its address space
  * @vma: the vma
@@ -595,7 +607,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
        obj->bind_count++;
        spin_unlock(&dev_priv->mm.obj_lock);
 
-       GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+       assert_bind_count(obj);
 
        return 0;
 
@@ -633,7 +645,7 @@ i915_vma_remove(struct i915_vma *vma)
         * reaped by the shrinker.
         */
        i915_gem_object_unpin_pages(obj);
-       GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+       assert_bind_count(obj);
 }
 
 int __i915_vma_do_pin(struct i915_vma *vma,