struct i915_vma_resource *vma_res;
struct drm_i915_gem_object *pinned;
struct i915_sw_dma_fence_cb cb;
- struct i915_refct_sgt *rsgt;
enum i915_cache_level cache_level;
unsigned int flags;
};
i915_vm_put(vw->vm);
if (vw->vma_res)
i915_vma_resource_put(vw->vma_res);
- if (vw->rsgt)
- i915_refct_sgt_put(vw->rsgt);
}
static const struct dma_fence_work_ops bind_ops = {
struct drm_i915_gem_object *obj = vma->obj;
i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes,
- i915_gem_object_is_readonly(obj),
- i915_gem_object_is_lmem(obj),
+ obj->mm.rsgt, i915_gem_object_is_readonly(obj),
+ i915_gem_object_is_lmem(obj), obj->mm.region,
vma->ops, vma->private, vma->node.start,
vma->node.size, vma->size);
}
work->vma_res = i915_vma_resource_get(vma->resource);
work->cache_level = cache_level;
work->flags = bind_flags;
- if (vma->obj->mm.rsgt)
- work->rsgt = i915_refct_sgt_get(vma->obj->mm.rsgt);
/*
* Note we only want to chain up to the migration fence on
* on the object to avoid waiting for the async bind to
* complete in the object destruction path.
*/
- if (!work->rsgt)
+ if (!work->vma_res->bi.pages_rsgt)
work->pinned = i915_gem_object_get(vma->obj);
} else {
if (vma->obj) {
GEM_BUG_ON(i915_vma_has_userfault(vma));
/* Object backend must be async capable. */
- GEM_WARN_ON(async && !vma->obj->mm.rsgt);
+ GEM_WARN_ON(async && !vma->resource->bi.pages_rsgt);
/* If vm is not open, unbind is a nop. */
vma_res->needs_wakeref = i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND) &&
atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
&vma->flags);
- /* Object backend must be async capable. */
- GEM_WARN_ON(async && !vma->obj->mm.rsgt);
-
i915_vma_detach(vma);
if (!async && unbind_fence) {