drm/i915: Use struct vma_resource instead of struct vma_snapshot
[linux-2.6-block.git] / drivers / gpu / drm / i915 / i915_vma.c
index b86666f653ca4c13a5d5a06297895c3fc91a4130..9d859b0a3fbe101495a3b29cf9317307e5c3c7f0 100644 (file)
@@ -288,7 +288,6 @@ struct i915_vma_work {
        struct i915_vma_resource *vma_res;
        struct drm_i915_gem_object *pinned;
        struct i915_sw_dma_fence_cb cb;
-       struct i915_refct_sgt *rsgt;
        enum i915_cache_level cache_level;
        unsigned int flags;
 };
@@ -314,8 +313,6 @@ static void __vma_release(struct dma_fence_work *work)
        i915_vm_put(vw->vm);
        if (vw->vma_res)
                i915_vma_resource_put(vw->vma_res);
-       if (vw->rsgt)
-               i915_refct_sgt_put(vw->rsgt);
 }
 
 static const struct dma_fence_work_ops bind_ops = {
@@ -386,8 +383,8 @@ i915_vma_resource_init_from_vma(struct i915_vma_resource *vma_res,
        struct drm_i915_gem_object *obj = vma->obj;
 
        i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes,
-                              i915_gem_object_is_readonly(obj),
-                              i915_gem_object_is_lmem(obj),
+                              obj->mm.rsgt, i915_gem_object_is_readonly(obj),
+                              i915_gem_object_is_lmem(obj), obj->mm.region,
                               vma->ops, vma->private, vma->node.start,
                               vma->node.size, vma->size);
 }
@@ -478,8 +475,6 @@ int i915_vma_bind(struct i915_vma *vma,
                work->vma_res = i915_vma_resource_get(vma->resource);
                work->cache_level = cache_level;
                work->flags = bind_flags;
-               if (vma->obj->mm.rsgt)
-                       work->rsgt = i915_refct_sgt_get(vma->obj->mm.rsgt);
 
                /*
                 * Note we only want to chain up to the migration fence on
@@ -505,7 +500,7 @@ int i915_vma_bind(struct i915_vma *vma,
                 * on the object to avoid waiting for the async bind to
                 * complete in the object destruction path.
                 */
-               if (!work->rsgt)
+               if (!work->vma_res->bi.pages_rsgt)
                        work->pinned = i915_gem_object_get(vma->obj);
        } else {
                if (vma->obj) {
@@ -1771,7 +1766,7 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
        GEM_BUG_ON(i915_vma_has_userfault(vma));
 
        /* Object backend must be async capable. */
-       GEM_WARN_ON(async && !vma->obj->mm.rsgt);
+       GEM_WARN_ON(async && !vma->resource->bi.pages_rsgt);
 
        /* If vm is not open, unbind is a nop. */
        vma_res->needs_wakeref = i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND) &&
@@ -1784,9 +1779,6 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
        atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
                   &vma->flags);
 
-       /* Object backend must be async capable. */
-       GEM_WARN_ON(async && !vma->obj->mm.rsgt);
-
        i915_vma_detach(vma);
 
        if (!async && unbind_fence) {