void i915_ggtt_suspend(struct i915_ggtt *ggtt)
{
- struct i915_vma *vma;
+ struct i915_vma *vma, *vn;
+ int open;
+
+ mutex_lock(&ggtt->vm.mutex);
+
+ /* Skip rewriting PTE on VMA unbind. */
+ open = atomic_xchg(&ggtt->vm.open, 0);
- list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
+ list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
i915_vma_wait_for_bind(vma);
+ if (i915_vma_is_pinned(vma))
+ continue;
+
+ if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
+ __i915_vma_evict(vma);
+ drm_mm_remove_node(&vma->node);
+ }
+ }
+
ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
ggtt->invalidate(ggtt);
+ atomic_set(&ggtt->vm.open, open);
+
+ mutex_unlock(&ggtt->vm.mutex);
intel_gt_check_and_clear_faults(ggtt->vm.gt);
}
return 0;
}
-int __i915_vma_unbind(struct i915_vma *vma)
+void __i915_vma_evict(struct i915_vma *vma)
{
- int ret;
-
- lockdep_assert_held(&vma->vm->mutex);
-
- if (i915_vma_is_pinned(vma)) {
- vma_print_allocator(vma, "is pinned");
- return -EAGAIN;
- }
-
- /*
- * After confirming that no one else is pinning this vma, wait for
- * any laggards who may have crept in during the wait (through
- * a residual pin skipping the vm->mutex) to complete.
- */
- ret = i915_vma_sync(vma);
- if (ret)
- return ret;
-
- if (!drm_mm_node_allocated(&vma->node))
- return 0;
-
GEM_BUG_ON(i915_vma_is_pinned(vma));
- GEM_BUG_ON(i915_vma_is_active(vma));
if (i915_vma_is_map_and_fenceable(vma)) {
/* Force a pagefault for domain tracking on next user access */
i915_vma_detach(vma);
vma_unbind_pages(vma);
+}
+
+int __i915_vma_unbind(struct i915_vma *vma)
+{
+ int ret;
+
+ lockdep_assert_held(&vma->vm->mutex);
+
+ if (!drm_mm_node_allocated(&vma->node))
+ return 0;
+
+ if (i915_vma_is_pinned(vma)) {
+ vma_print_allocator(vma, "is pinned");
+ return -EAGAIN;
+ }
+
+ /*
+ * After confirming that no one else is pinning this vma, wait for
+ * any laggards who may have crept in during the wait (through
+ * a residual pin skipping the vm->mutex) to complete.
+ */
+ ret = i915_vma_sync(vma);
+ if (ret)
+ return ret;
+
+ GEM_BUG_ON(i915_vma_is_active(vma));
+ __i915_vma_evict(vma);
drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
return 0;
intel_wakeref_t wakeref = 0;
int err;
- if (!drm_mm_node_allocated(&vma->node))
- return 0;
-
/* Optimistic wait before taking the mutex */
err = i915_vma_sync(vma);
if (err)
- goto out_rpm;
+ return err;
+
+ if (!drm_mm_node_allocated(&vma->node))
+ return 0;
if (i915_vma_is_pinned(vma)) {
vma_print_allocator(vma, "is pinned");
u64 size, u64 alignment, u64 flags);
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
void i915_vma_revoke_mmap(struct i915_vma *vma);
+void __i915_vma_evict(struct i915_vma *vma);
int __i915_vma_unbind(struct i915_vma *vma);
int __must_check i915_vma_unbind(struct i915_vma *vma);
void i915_vma_unlink_ctx(struct i915_vma *vma);